repo_name
stringlengths 7
79
| path
stringlengths 4
179
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 959
798k
| license
stringclasses 15
values |
---|---|---|---|---|---|
glennq/scikit-learn
|
sklearn/utils/tests/test_fixes.py
|
8
|
2324
|
# Authors: Gael Varoquaux <[email protected]>
# Justin Vincent
# Lars Buitinck
# License: BSD 3 clause
import pickle
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.fixes import divide, expit
from sklearn.utils.fixes import astype
from sklearn.utils.fixes import MaskedArray
def test_expit():
# Check numerical stability of expit (logistic function).
# Simulate our previous Cython implementation, based on
#http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
assert_almost_equal(expit(1000.), 1. / (1. + np.exp(-1000.)), decimal=16)
assert_almost_equal(expit(-1000.), np.exp(-1000.) / (1. + np.exp(-1000.)),
decimal=16)
x = np.arange(10)
out = np.zeros_like(x, dtype=np.float32)
assert_array_almost_equal(expit(x), expit(x, out=out))
def test_divide():
assert_equal(divide(.6, 1), .600000000000)
def test_astype_copy_memory():
a_int32 = np.ones(3, np.int32)
# Check that dtype conversion works
b_float32 = astype(a_int32, dtype=np.float32, copy=False)
assert_equal(b_float32.dtype, np.float32)
# Changing dtype forces a copy even if copy=False
assert_false(np.may_share_memory(b_float32, a_int32))
# Check that copy can be skipped if requested dtype match
c_int32 = astype(a_int32, dtype=np.int32, copy=False)
assert_true(c_int32 is a_int32)
# Check that copy can be forced, and is the case by default:
d_int32 = astype(a_int32, dtype=np.int32, copy=True)
assert_false(np.may_share_memory(d_int32, a_int32))
e_int32 = astype(a_int32, dtype=np.int32)
assert_false(np.may_share_memory(e_int32, a_int32))
def test_masked_array_obj_dtype_pickleable():
marr = MaskedArray([1, None, 'a'], dtype=object)
for mask in (True, False, [0, 1, 0]):
marr.mask = mask
marr_pickled = pickle.loads(pickle.dumps(marr))
assert_array_equal(marr.data, marr_pickled.data)
assert_array_equal(marr.mask, marr_pickled.mask)
|
bsd-3-clause
|
eclee25/flu-SDI-exploratory-age
|
scripts/create_fluseverity_figs_v5/ILINet_corrCoef_2wkPeriod_v5.py
|
1
|
3936
|
#!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 1/15/15
###Function: correlation coefficient between benchmark and zRR vs. moving 2 week window for SDI data for 7 week fall baseline
###Import data: SQL_export/OR_allweeks_outpatient.csv, SQL_export/totalpop.csv, My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv
###Command Line: python S_corrCoef_2wkPeriod_v5.py
##############################################
### notes ###
# Incidence per 100,000 is normalized by total population by second calendar year of the flu season
### packages/modules ###
import csv
import matplotlib.pyplot as plt
from scipy.stats import pearsonr
import numpy as np
import random as rnd
from collections import defaultdict
## local modules ##
import functions_v5 as fxn
rnd.seed(10)
### data structures ###
### functions ###
### data files ###
incidin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/CDC_Source/Import_Data/all_cdc_source_data.csv','r')
incidin.readline() # remove header
incid = csv.reader(incidin, delimiter=',')
popin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Census/Import_Data/totalpop_age_Census_98-14.csv', 'r')
pop = csv.reader(popin, delimiter=',')
ixin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/CDC_Source/Import_Data/cdc_severity_index_long.csv','r')
ixin.readline()
ix = csv.reader(ixin, delimiter=',')
### called/local plotting parameters ###
ps = fxn.pseasons
fw = fxn.gp_fluweeks
sl = fxn.gp_seasonlabels
colvec = fxn.gp_colors
wklab = fxn.gp_weeklabels
norm = fxn.gp_normweeks
fs = 24
fssml = 16
lw = fxn.gp_linewidth
# custom xticks for window period
wk1 = range(40,54) + range(1,39)
first_wk = [('0'+str(wk))[-2:] for wk in wk1]
wk2 = range(41,54) + range(1,40)
sec_wk = [('0'+str(wk))[-2:] for wk in wk2]
window_xticks = [fir+sec for fir, sec in zip(first_wk, sec_wk)]
nswaps = 250
### program ###
# import benchmark
# d_benchmark[seasonnum] = CDC benchmark index value
d_benchmark = fxn.benchmark_import(ix, 8) # no ILINet
benchmarks = [d_benchmark[s] for s in ps]
###################################
### 7 week fall baseline ###
d_wk, d_pop, d_totILI53ls, d_totILIadj53ls, d_ageILIadj_season = fxn.ILINet_week_RR_processing(incid, pop)
d_totIncid53ls, d_totIncidAdj53ls, d_RR53ls, d_zRR53ls = fxn.week_RR_processing_part2(d_pop, d_totILI53ls, d_totILIadj53ls, d_ageILIadj_season)
# preparation of values for Pearson R calculation
d_window_zRRma = fxn.zRR_movingAverage_windows(d_zRR53ls, 2)
# calculate Pearson's correlation coefficient between zRR moving average and benchmark for each window period
benchmark_zRRma_corr = [pearsonr(d_window_zRRma[w], benchmarks)[0] for w in sorted(d_window_zRRma)]
print [np.mean(d_zRR53ls[s][:2]) for s in ps]
print d_window_zRRma[0]
print benchmarks
# create null hypothesis through shuffling
dict_iter_nullCorr = defaultdict(list)
for i in range(nswaps):
null_corr = [pearsonr(fxn.returnShuffled(d_window_zRRma[w][:]), benchmarks)[0] for w in sorted(d_window_zRRma)] # create list copy to shuffle
dict_iter_nullCorr[i] = null_corr
fig1 = plt.figure()
ax1 = fig1.add_subplot(1,1,1)
for i in range(nswaps):
ax1.plot(range(52), dict_iter_nullCorr[i], color='grey', alpha=0.4, linewidth=1) # null line
ax1.plot(range(7), benchmark_zRRma_corr[:7], marker='o', color='black', alpha=0.4, linestyle='solid', linewidth=lw)
ax1.plot(range(6, 52), benchmark_zRRma_corr[6:], marker='o', color='black', linestyle='solid', linewidth=lw)
ax1.set_ylabel(r'Pearson R: $\beta$ & $\sigma(t)$ (2-wk mean)', fontsize=fs)
ax1.set_xlabel('Window Period', fontsize=fs)
plt.xticks(range(52)[::5], window_xticks[::5])
ax1.set_xlim([0,fw])
ax1.set_ylim([-1.0,1.0])
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs_v5/exploratory/ILINet_corrCoef_window_fallBL_wNull.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# plt.show()
|
mit
|
anntzer/scikit-learn
|
sklearn/neighbors/_graph.py
|
8
|
21251
|
"""Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
# Tom Dupre la Tour
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
from ._base import KNeighborsMixin, RadiusNeighborsMixin
from ._base import NeighborsBase
from ._unsupervised import NearestNeighbors
from ..base import TransformerMixin
from ..utils.validation import check_is_fitted, _deprecate_positional_args
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
if include_self == 'auto':
include_self = mode == 'connectivity'
# it does not include each sample as its own neighbors
if not include_self:
X = None
return X
@_deprecate_positional_args
def kneighbors_graph(X, n_neighbors, *, mode='connectivity',
metric='minkowski', p=2, metric_params=None,
include_self=False, n_jobs=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like of shape (n_samples, n_features) or BallTree
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, default='connectivity'
Type of returned matrix: 'connectivity' will return the connectivity
matrix with ones and zeros, and 'distance' will return the distances
between neighbors according to the given metric.
metric : str, default='minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
p : int, default=2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
additional keyword arguments for the metric function.
include_self : bool or 'auto', default=False
Whether or not to mark each sample as the first nearest neighbor to
itself. If 'auto', then True is used for mode='connectivity' and False
for mode='distance'.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
A : sparse matrix of shape (n_samples, n_samples)
Graph where A[i, j] is assigned the weight of edge that
connects i to j. The matrix is of CSR format.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2, mode='connectivity', include_self=True)
>>> A.toarray()
array([[1., 0., 1.],
[0., 1., 1.],
[1., 0., 1.]])
See Also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors=n_neighbors, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X._fit_X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
@_deprecate_positional_args
def radius_neighbors_graph(X, radius, *, mode='connectivity',
metric='minkowski', p=2, metric_params=None,
include_self=False, n_jobs=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like of shape (n_samples, n_features) or BallTree
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, default='connectivity'
Type of returned matrix: 'connectivity' will return the connectivity
matrix with ones and zeros, and 'distance' will return the distances
between neighbors according to the given metric.
metric : str, default='minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
p : int, default=2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
additional keyword arguments for the metric function.
include_self : bool or 'auto', default=False
Whether or not to mark each sample as the first nearest neighbor to
itself. If 'auto', then True is used for mode='connectivity' and False
for mode='distance'.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
A : sparse matrix of shape (n_samples, n_samples)
Graph where A[i, j] is assigned the weight of edge that connects
i to j. The matrix is of CSR format.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5, mode='connectivity',
... include_self=True)
>>> A.toarray()
array([[1., 0., 1.],
[0., 1., 0.],
[1., 0., 1.]])
See Also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X._fit_X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
class KNeighborsTransformer(KNeighborsMixin,
TransformerMixin,
NeighborsBase):
"""Transform X into a (weighted) graph of k nearest neighbors
The transformed data is a sparse graph as returned by kneighbors_graph.
Read more in the :ref:`User Guide <neighbors_transformer>`.
.. versionadded:: 0.22
Parameters
----------
mode : {'distance', 'connectivity'}, default='distance'
Type of returned matrix: 'connectivity' will return the connectivity
matrix with ones and zeros, and 'distance' will return the distances
between neighbors according to the given metric.
n_neighbors : int, default=5
Number of neighbors for each sample in the transformed sparse graph.
For compatibility reasons, as each sample is considered as its own
neighbor, one extra neighbor will be computed when mode == 'distance'.
In this case, the sparse graph contains (n_neighbors + 1) neighbors.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : str or callable, default='minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
p : int, default=2
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
n_jobs : int, default=1
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
effective_metric_ : str or callable
The distance metric used. It will be same as the `metric` parameter
or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to
'minkowski' and `p` parameter set to 2.
effective_metric_params_ : dict
Additional keyword arguments for the metric function. For most metrics
will be same with `metric_params` parameter, but may also contain the
`p` parameter value if the `effective_metric_` attribute is set to
'minkowski'.
n_samples_fit_ : int
Number of samples in the fitted data.
Examples
--------
>>> from sklearn.manifold import Isomap
>>> from sklearn.neighbors import KNeighborsTransformer
>>> from sklearn.pipeline import make_pipeline
>>> estimator = make_pipeline(
... KNeighborsTransformer(n_neighbors=5, mode='distance'),
... Isomap(neighbors_algorithm='precomputed'))
"""
@_deprecate_positional_args
def __init__(self, *, mode='distance', n_neighbors=5, algorithm='auto',
leaf_size=30, metric='minkowski', p=2, metric_params=None,
n_jobs=1):
super(KNeighborsTransformer, self).__init__(
n_neighbors=n_neighbors, radius=None, algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs)
self.mode = mode
def fit(self, X, y=None):
"""Fit the k-nearest neighbors transformer from the training dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples) if metric='precomputed'
Training data.
Returns
-------
self : KNeighborsTransformer
The fitted k-nearest neighbors transformer.
"""
return self._fit(X)
def transform(self, X):
"""Computes the (weighted) graph of Neighbors for points in X
Parameters
----------
X : array-like of shape (n_samples_transform, n_features)
Sample data.
Returns
-------
Xt : sparse matrix of shape (n_samples_transform, n_samples_fit)
Xt[i, j] is assigned the weight of edge that connects i to j.
Only the neighbors have an explicit value.
The diagonal is always explicit.
The matrix is of CSR format.
"""
check_is_fitted(self)
add_one = self.mode == 'distance'
return self.kneighbors_graph(X, mode=self.mode,
n_neighbors=self.n_neighbors + add_one)
def fit_transform(self, X, y=None):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training set.
y : ignored
Returns
-------
Xt : sparse matrix of shape (n_samples, n_samples)
Xt[i, j] is assigned the weight of edge that connects i to j.
Only the neighbors have an explicit value.
The diagonal is always explicit.
The matrix is of CSR format.
"""
return self.fit(X).transform(X)
def _more_tags(self):
return {
'_xfail_checks': {
'check_methods_sample_order_invariance':
'check is not applicable.'
}
}
class RadiusNeighborsTransformer(RadiusNeighborsMixin,
TransformerMixin,
NeighborsBase):
"""Transform X into a (weighted) graph of neighbors nearer than a radius
The transformed data is a sparse graph as returned by
radius_neighbors_graph.
Read more in the :ref:`User Guide <neighbors_transformer>`.
.. versionadded:: 0.22
Parameters
----------
mode : {'distance', 'connectivity'}, default='distance'
Type of returned matrix: 'connectivity' will return the connectivity
matrix with ones and zeros, and 'distance' will return the distances
between neighbors according to the given metric.
radius : float, default=1.
Radius of neighborhood in the transformed sparse graph.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : str or callable, default='minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
p : int, default=2
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
n_jobs : int, default=1
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
effective_metric_ : str or callable
The distance metric used. It will be same as the `metric` parameter
or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to
'minkowski' and `p` parameter set to 2.
effective_metric_params_ : dict
Additional keyword arguments for the metric function. For most metrics
will be same with `metric_params` parameter, but may also contain the
`p` parameter value if the `effective_metric_` attribute is set to
'minkowski'.
n_samples_fit_ : int
Number of samples in the fitted data.
Examples
--------
>>> from sklearn.cluster import DBSCAN
>>> from sklearn.neighbors import RadiusNeighborsTransformer
>>> from sklearn.pipeline import make_pipeline
>>> estimator = make_pipeline(
... RadiusNeighborsTransformer(radius=42.0, mode='distance'),
... DBSCAN(min_samples=30, metric='precomputed'))
"""
@_deprecate_positional_args
def __init__(self, *, mode='distance', radius=1., algorithm='auto',
leaf_size=30, metric='minkowski', p=2, metric_params=None,
n_jobs=1):
super(RadiusNeighborsTransformer, self).__init__(
n_neighbors=None, radius=radius, algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs)
self.mode = mode
def fit(self, X, y=None):
"""Fit the radius neighbors transformer from the training dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples) if metric='precomputed'
Training data.
Returns
-------
self : RadiusNeighborsTransformer
The fitted radius neighbors transformer.
"""
return self._fit(X)
def transform(self, X):
"""Computes the (weighted) graph of Neighbors for points in X
Parameters
----------
X : array-like of shape (n_samples_transform, n_features)
Sample data
Returns
-------
Xt : sparse matrix of shape (n_samples_transform, n_samples_fit)
Xt[i, j] is assigned the weight of edge that connects i to j.
Only the neighbors have an explicit value.
The diagonal is always explicit.
The matrix is of CSR format.
"""
check_is_fitted(self)
return self.radius_neighbors_graph(X, mode=self.mode,
sort_results=True)
def fit_transform(self, X, y=None):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training set.
y : ignored
Returns
-------
Xt : sparse matrix of shape (n_samples, n_samples)
Xt[i, j] is assigned the weight of edge that connects i to j.
Only the neighbors have an explicit value.
The diagonal is always explicit.
The matrix is of CSR format.
"""
return self.fit(X).transform(X)
def _more_tags(self):
return {
'_xfail_checks': {
'check_methods_sample_order_invariance':
'check is not applicable.'
}
}
|
bsd-3-clause
|
gusmaogabriels/GPy
|
setup.py
|
4
|
2183
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup
# Version number
version = '0.6.1'
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name = 'GPy',
version = version,
author = read('AUTHORS.txt'),
author_email = "[email protected]",
description = ("The Gaussian Process Toolbox"),
license = "BSD 3-clause",
keywords = "machine-learning gaussian-processes kernels",
url = "http://sheffieldml.github.com/GPy/",
packages = ["GPy.models",
"GPy.inference.optimization",
"GPy.inference.mcmc",
"GPy.inference",
"GPy.inference.latent_function_inference",
"GPy.likelihoods", "GPy.mappings",
"GPy.examples", "GPy.core.parameterization",
"GPy.core", "GPy.testing",
"GPy", "GPy.util", "GPy.kern",
"GPy.kern._src.psi_comp", "GPy.kern._src",
"GPy.plotting.matplot_dep.latent_space_visualizations.controllers",
"GPy.plotting.matplot_dep.latent_space_visualizations",
"GPy.plotting.matplot_dep", "GPy.plotting"],
package_dir={'GPy': 'GPy'},
package_data = {'GPy': ['defaults.cfg', 'installation.cfg',
'util/data_resources.json',
'util/football_teams.json']},
include_package_data = True,
py_modules = ['GPy.__init__'],
test_suite = 'GPy.testing',
long_description=read('README.md'),
install_requires=['numpy>=1.7', 'scipy>=0.12'],
extras_require = {'docs':['matplotlib >=1.3','Sphinx','IPython']},
classifiers=['License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Artificial Intelligence']
)
|
bsd-3-clause
|
siou83/trading-with-python
|
lib/functions.py
|
76
|
11627
|
# -*- coding: utf-8 -*-
"""
twp support functions
@author: Jev Kuznetsov
Licence: GPL v2
"""
from scipy import polyfit, polyval
import datetime as dt
#from datetime import datetime, date
from pandas import DataFrame, Index, Series
import csv
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def nans(shape, dtype=float):
''' create a nan numpy array '''
a = np.empty(shape, dtype)
a.fill(np.nan)
return a
def plotCorrelationMatrix(price, thresh = None):
''' plot a correlation matrix as a heatmap image
inputs:
price: prices DataFrame
thresh: correlation threshold to use for checking, default None
'''
symbols = price.columns.tolist()
R = price.pct_change()
correlationMatrix = R.corr()
if thresh is not None:
correlationMatrix = correlationMatrix > thresh
plt.imshow(abs(correlationMatrix.values),interpolation='none')
plt.xticks(range(len(symbols)),symbols)
plt.yticks(range(len(symbols)),symbols)
plt.colorbar()
plt.title('Correlation matrix')
return correlationMatrix
def pca(A):
""" performs principal components analysis
(PCA) on the n-by-p DataFrame A
Rows of A correspond to observations, columns to variables.
Returns :
coeff : principal components, column-wise
transform: A in principal component space
latent : eigenvalues
"""
# computing eigenvalues and eigenvectors of covariance matrix
M = (A - A.mean()).T # subtract the mean (along columns)
[latent,coeff] = np.linalg.eig(np.cov(M)) # attention:not always sorted
idx = np.argsort(latent) # sort eigenvalues
idx = idx[::-1] # in ascending order
coeff = coeff[:,idx]
latent = latent[idx]
score = np.dot(coeff.T,A.T) # projection of the data in the new space
transform = DataFrame(index = A.index, data = score.T)
return coeff,transform,latent
def pos2pnl(price,position , ibTransactionCost=False ):
"""
calculate pnl based on price and position
Inputs:
---------
price: series or dataframe of price
position: number of shares at each time. Column names must be same as in price
ibTransactionCost: use bundled Interactive Brokers transaction cost of 0.005$/share
Returns a portfolio DataFrame
"""
delta=position.diff()
port = DataFrame(index=price.index)
if isinstance(price,Series): # no need to sum along 1 for series
port['cash'] = (-delta*price).cumsum()
port['stock'] = (position*price)
else: # dealing with DataFrame here
port['cash'] = (-delta*price).sum(axis=1).cumsum()
port['stock'] = (position*price).sum(axis=1)
if ibTransactionCost:
tc = -0.005*position.diff().abs() # basic transaction cost
tc[(tc>-1) & (tc<0)] = -1 # everything under 1$ will be ceil'd to 1$
if isinstance(price,DataFrame):
tc = tc.sum(axis=1)
port['tc'] = tc.cumsum()
else:
port['tc'] = 0.
port['total'] = port['stock']+port['cash']+port['tc']
return port
def tradeBracket(price,entryBar,maxTradeLength,bracket):
'''
trade a symmetrical bracket on price series, return price delta and exit bar #
Input
------
price : series of price values
entryBar: entry bar number
maxTradeLength : max trade duration in bars
bracket : allowed price deviation
'''
lastBar = min(entryBar+maxTradeLength,len(price)-1)
p = price[entryBar:lastBar]-price[entryBar]
idxOutOfBound = np.nonzero(abs(p)>bracket) # find indices where price comes out of bracket
if idxOutOfBound[0].any(): # found match
priceDelta = p[idxOutOfBound[0][0]]
exitBar = idxOutOfBound[0][0]+entryBar
else: # all in bracket, exiting based on time
priceDelta = p[-1]
exitBar = lastBar
return priceDelta, exitBar
def estimateBeta(priceY,priceX,algo = 'standard'):
'''
estimate stock Y vs stock X beta using iterative linear
regression. Outliers outside 3 sigma boundary are filtered out
Parameters
--------
priceX : price series of x (usually market)
priceY : price series of y (estimate beta of this price)
Returns
--------
beta : stockY beta relative to stock X
'''
X = DataFrame({'x':priceX,'y':priceY})
if algo=='returns':
ret = (X/X.shift(1)-1).dropna().values
#print len(ret)
x = ret[:,0]
y = ret[:,1]
# filter high values
low = np.percentile(x,20)
high = np.percentile(x,80)
iValid = (x>low) & (x<high)
x = x[iValid]
y = y[iValid]
iteration = 1
nrOutliers = 1
while iteration < 10 and nrOutliers > 0 :
(a,b) = polyfit(x,y,1)
yf = polyval([a,b],x)
#plot(x,y,'x',x,yf,'r-')
err = yf-y
idxOutlier = abs(err) > 3*np.std(err)
nrOutliers =sum(idxOutlier)
beta = a
#print 'Iteration: %i beta: %.2f outliers: %i' % (iteration,beta, nrOutliers)
x = x[~idxOutlier]
y = y[~idxOutlier]
iteration += 1
elif algo=='log':
x = np.log(X['x'])
y = np.log(X['y'])
(a,b) = polyfit(x,y,1)
beta = a
elif algo=='standard':
ret =np.log(X).diff().dropna()
beta = ret['x'].cov(ret['y'])/ret['x'].var()
else:
raise TypeError("unknown algorithm type, use 'standard', 'log' or 'returns'")
return beta
def estimateVolatility(ohlc, N=10, algo='YangZhang'):
"""
Volatility estimation
Possible algorithms: ['YangZhang', 'CC']
"""
cc = np.log(ohlc.close/ohlc.close.shift(1))
if algo == 'YangZhang': # Yang-zhang volatility
ho = np.log(ohlc.high/ohlc.open)
lo = np.log(ohlc.low/ohlc.open)
co = np.log(ohlc.close/ohlc.open)
oc = np.log(ohlc.open/ohlc.close.shift(1))
oc_sq = oc**2
cc_sq = cc**2
rs = ho*(ho-co)+lo*(lo-co)
close_vol = pd.rolling_sum(cc_sq, window=N) * (1.0 / (N - 1.0))
open_vol = pd.rolling_sum(oc_sq, window=N) * (1.0 / (N - 1.0))
window_rs = pd.rolling_sum(rs, window=N) * (1.0 / (N - 1.0))
result = (open_vol + 0.164333 * close_vol + 0.835667 * window_rs).apply(np.sqrt) * np.sqrt(252)
result[:N-1] = np.nan
elif algo == 'CC': # standard close-close estimator
result = np.sqrt(252)*np.sqrt(((pd.rolling_sum(cc**2,N))/N))
else:
raise ValueError('Unknown algo type.')
return result*100
def rank(current,past):
''' calculate a relative rank 0..1 for a value against series '''
return (current>past).sum()/float(past.count())
def returns(df):
return (df/df.shift(1)-1)
def logReturns(df):
t = np.log(df)
return t-t.shift(1)
def dateTimeToDate(idx):
''' convert datetime index to date '''
dates = []
for dtm in idx:
dates.append(dtm.date())
return dates
def readBiggerScreener(fName):
''' import data from Bigger Capital screener '''
with open(fName,'rb') as f:
reader = csv.reader(f)
rows = [row for row in reader]
header = rows[0]
data = [[] for i in range(len(header))]
for row in rows[1:]:
for i,elm in enumerate(row):
try:
data[i].append(float(elm))
except Exception:
data[i].append(str(elm))
return DataFrame(dict(zip(header,data)),index=Index(range(len(data[0]))))[header]
def sharpe(pnl):
return np.sqrt(250)*pnl.mean()/pnl.std()
def drawdown(s):
"""
calculate max drawdown and duration
Input:
s, price or cumulative pnl curve $
Returns:
drawdown : vector of drawdwon values
duration : vector of drawdown duration
"""
# convert to array if got pandas series, 10x speedup
if isinstance(s,pd.Series):
idx = s.index
s = s.values
returnSeries = True
else:
returnSeries = False
if s.min() < 0: # offset if signal minimum is less than zero
s = s-s.min()
highwatermark = np.zeros(len(s))
drawdown = np.zeros(len(s))
drawdowndur = np.zeros(len(s))
for t in range(1,len(s)):
highwatermark[t] = max(highwatermark[t-1], s[t])
drawdown[t] = (highwatermark[t]-s[t])
drawdowndur[t]= (0 if drawdown[t] == 0 else drawdowndur[t-1]+1)
if returnSeries:
return pd.Series(index=idx,data=drawdown), pd.Series(index=idx,data=drawdowndur)
else:
return drawdown , drawdowndur
def profitRatio(pnl):
'''
calculate profit ratio as sum(pnl)/drawdown
Input: pnl - daily pnl, Series or DataFrame
'''
def processVector(pnl): # process a single column
s = pnl.fillna(0)
dd = drawdown(s)[0]
p = s.sum()/dd.max()
return p
if isinstance(pnl,Series):
return processVector(pnl)
elif isinstance(pnl,DataFrame):
p = Series(index = pnl.columns)
for col in pnl.columns:
p[col] = processVector(pnl[col])
return p
else:
raise TypeError("Input must be DataFrame or Series, not "+str(type(pnl)))
def candlestick(df,width=0.5, colorup='b', colordown='r'):
''' plot a candlestick chart of a dataframe '''
O = df['open'].values
H = df['high'].values
L = df['low'].values
C = df['close'].values
fig = plt.gcf()
ax = plt.axes()
#ax.hold(True)
X = df.index
#plot high and low
ax.bar(X,height=H-L,bottom=L,width=0.1,color='k')
idxUp = C>O
ax.bar(X[idxUp],height=(C-O)[idxUp],bottom=O[idxUp],width=width,color=colorup)
idxDown = C<=O
ax.bar(X[idxDown],height=(O-C)[idxDown],bottom=C[idxDown],width=width,color=colordown)
try:
fig.autofmt_xdate()
except Exception: # pragma: no cover
pass
ax.grid(True)
#ax.bar(x,height=H-L,bottom=L,width=0.01,color='k')
def datetime2matlab(t):
''' convert datetime timestamp to matlab numeric timestamp '''
mdn = t + dt.timedelta(days = 366)
frac = (t-dt.datetime(t.year,t.month,t.day,0,0,0)).seconds / (24.0 * 60.0 * 60.0)
return mdn.toordinal() + frac
def getDataSources(fName = None):
''' return data sources directories for this machine.
directories are defined in datasources.ini or provided filepath'''
import socket
from ConfigParser import ConfigParser
pcName = socket.gethostname()
p = ConfigParser()
p.optionxform = str
if fName is None:
fName = 'datasources.ini'
p.read(fName)
if pcName not in p.sections():
raise NameError('Host name section %s not found in file %s' %(pcName,fName))
dataSources = {}
for option in p.options(pcName):
dataSources[option] = p.get(pcName,option)
return dataSources
if __name__ == '__main__':
df = DataFrame({'open':[1,2,3],'high':[5,6,7],'low':[-2,-1,0],'close':[2,1,4]})
plt.clf()
candlestick(df)
|
bsd-3-clause
|
smartscheduling/scikit-learn-categorical-tree
|
sklearn/tests/test_naive_bayes.py
|
142
|
17496
|
import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.cross_validation import cross_val_score, train_test_split
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1]), 2)
assert_equal(clf.predict_proba(X[0]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba(X[1])), 1)
assert_almost_equal(np.sum(clf.predict_proba(X[-1])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([1, 0]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([0, 1, 1, 0, 0, 1])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
|
bsd-3-clause
|
kingjr/jr-tools
|
jr/plot/base.py
|
1
|
13359
|
# Author: Jean-Remi King <[email protected]>
#
# License: Simplified BSD
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as col
from matplotlib.colors import LinearSegmentedColormap
from ..utils import logcenter
from ..stats import median_abs_deviation
RdPuBu = col.LinearSegmentedColormap.from_list('RdPuBu', ['b', 'r'])
def alpha_cmap(cmap='RdBu_r', slope=-10, thres=.5, diverge=True, shift=0):
if isinstance(cmap, str):
cmap = plt.get_cmap(cmap)
if isinstance(cmap, LinearSegmentedColormap):
cmap._init()
cmap = cmap._lut[:cmap.N, :] * 255
if diverge:
logit = lambda x: \
np.abs(2 * (1 / (1 + np.exp(slope * (x + shift))) - .5)) - thres
else:
logit = lambda x: 1 / (1 + np.exp(slope * (x + shift))) - thres
logit2 = lambda x: logit(x) / logit(1.) * (logit(x) > 0)
cmap[:, -1] = [255 * logit2(ii)
for ii in np.linspace(-1.0, 1.0, cmap.shape[0])]
return cmap
def share_clim(axes, clim=None):
"""Share clim across multiple axes
Parameters
----------
axes : plt.axes
clim : np.array | list, shape(2,), optional
Defaults is min and max across axes.clim.
"""
# Find min max of clims
if clim is None:
clim = list()
for ax in axes:
for im in ax.get_images():
clim += np.array(im.get_clim()).flatten().tolist()
clim = [np.min(clim), np.max(clim)]
# apply common clim
for ax in axes:
for im in ax.get_images():
im.set_clim(clim)
plt.draw()
def plot_widths(xs, ys, widths, ax=None, color='b', xlim=None, ylim=None,
**kwargs):
xs, ys, widths = np.array(xs), np.array(ys), np.array(widths)
if not (len(xs) == len(ys) == len(widths)):
raise ValueError('xs, ys, and widths must have identical lengths')
fig = None
if ax is None:
fig, ax = plt.subplots(1)
segmentx, segmenty = [xs[0]], [ys[0]]
current_width = widths[0]
for ii, (x, y, width) in enumerate(zip(xs, ys, widths)):
segmentx.append(x)
segmenty.append(y)
if (width != current_width) or (ii == (len(xs) - 1)):
ax.plot(segmentx, segmenty, linewidth=current_width, color=color,
**kwargs)
segmentx, segmenty = [x], [y]
current_width = width
if xlim is None:
xlim = [min(xs), max(xs)]
if ylim is None:
ylim = [min(ys), max(ys)]
ax.set_xlim(xlim)
ax.set_ylim(ylim)
return ax if fig is None else fig
def plot_sem(x, y, robust=False, **kwargs):
"""
Parameters
----------
x : list | np.array()
y : list | np.array()
robust : bool
If False use mean + std,
If True median + mad
ax
alpha
color
line_args
err_args
Returns
-------
ax
Adapted from http://tonysyu.github.io/plotting-error-bars.html#.VRE9msvmvEU
"""
x, y = np.array(x), np.array(y)
if robust:
m = np.nanmedian(y, axis=0)
std = median_abs_deviation(y, axis=0)
else:
m = np.nanmean(y, axis=0)
std = np.nanstd(y, axis=0)
n = y.shape[0] - np.sum(np.isnan(y), axis=0)
return plot_eb(x, m, std / np.sqrt(n), **kwargs)
def plot_eb(x, y, yerr, ax=None, alpha=0.3, color=None, line_args=dict(),
err_args=dict()):
"""
Parameters
----------
x : list | np.array()
y : list | np.array()
yerr : list | np.array() | float
ax
alpha
color
line_args
err_args
Returns
-------
ax
Adapted from http://tonysyu.github.io/plotting-error-bars.html#.VRE9msvmvEU
"""
x, y = np.array(x), np.array(y)
ax = ax if ax is not None else plt.gca()
if 'edgecolor' not in err_args.keys():
err_args['edgecolor'] = 'none'
if color is None:
color = ax._get_lines.color_cycle.next()
if np.isscalar(yerr) or len(yerr) == len(y):
ymin = y - yerr
ymax = y + yerr
elif len(yerr) == 2:
ymin, ymax = yerr
ax.plot(x, y, color=color, **line_args)
ax.fill_between(x, ymax, ymin, alpha=alpha, color=color, **err_args)
return ax
def fill_betweenx_discontinuous(ax, ymin, ymax, x, freq=1, **kwargs):
"""Fill betwwen x even if x is discontinuous clusters
Parameters
----------
ax : axis
x : list
Returns
-------
ax : axis
"""
x = np.array(x)
min_gap = (1.1 / freq)
while np.any(x):
# If with single time point
if len(x) > 1:
xmax = np.where((x[1:] - x[:-1]) > min_gap)[0]
else:
xmax = [0]
# If continuous
if not np.any(xmax):
xmax = [len(x) - 1]
print(x[0], x[xmax[0]])
ax.fill_betweenx((ymin, ymax), x[0], x[xmax[0]], **kwargs)
# remove from list
x = x[(xmax[0] + 1):]
return ax
def pcolormesh_45deg(C, ax=None, xticks=None, xticklabels=None, yticks=None,
yticklabels=None, aspect='equal', rotation=45,
*args, **kwargs):
"""Adapted from http://stackoverflow.com/questions/12848581/
is-there-a-way-to-rotate-a-matplotlib-plot-by-45-degrees"""
import itertools
if ax is None:
ax = plt.gca()
n = C.shape[0]
# create rotation/scaling matrix
t = np.array([[1, .5], [-1, .5]])
# create coordinate matrix and transform it
product = itertools.product(range(n, -1, -1), range(0, n + 1, 1))
A = np.dot(np.array([(ii[1], ii[0]) for ii in product]), t)
# plot
ax.pcolormesh((2 * A[:, 1].reshape(n + 1, n + 1) - n),
A[:, 0].reshape(n + 1, n + 1),
np.flipud(C), *args, **kwargs)
xticks = np.linspace(0, n - 1, n, dtype=int) if xticks is None else xticks
yticks = np.linspace(0, n - 1, n, dtype=int) if yticks is None else yticks
if xticks is not None:
xticklabels = xticks if xticklabels is None else xticklabels
for tick, label, in zip(xticks, xticklabels):
print(tick, label)
ax.scatter(-n + tick + .5, tick + .5, marker='x', color='k')
ax.text(-n + tick + .5, tick + .5, label,
horizontalalignment='right', rotation=-rotation)
if yticks is not None:
yticklabels = yticks if yticklabels is None else yticklabels
for tick, label, in zip(yticks, yticklabels):
ax.scatter(tick + .5, n - tick - .5, marker='x', color='k')
ax.text(tick + .5, n - tick - .5, label,
horizontalalignment='left', rotation=rotation)
if aspect:
ax.set_aspect(aspect)
ax.set_xlim(-n, n)
ax.set_ylim(-n, n)
ax.plot([-n, 0, n, 0., -n], [0, n, 0, -n, 0], color='k')
ax.axis('off')
return ax
def pretty_plot(ax=None):
if ax is None:
plt.gca()
ax.tick_params(colors='dimgray')
ax.xaxis.label.set_color('dimgray')
ax.yaxis.label.set_color('dimgray')
try:
ax.zaxis.label.set_color('dimgray')
except AttributeError:
pass
try:
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
except ValueError:
pass
ax.spines['left'].set_color('dimgray')
ax.spines['bottom'].set_color('dimgray')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
return ax
def pretty_colorbar(im=None, ax=None, ticks=None, ticklabels=None, nticks=3,
**kwargs):
if ax is None:
ax = plt.gca()
if im is None:
for obj in ax.get_children():
if isinstance(obj, matplotlib.image.AxesImage):
im = obj
continue
if im is None:
raise RuntimeError('did not find the image')
if ticks is None:
clim = im.get_clim()
# XXX bug https://github.com/matplotlib/matplotlib/issues/6352
if None in clim:
fig = ax.get_figure()
fig.canvas.draw()
clim = im.get_clim()
ticks = np.linspace(clim[0], clim[1], nticks)
cb = plt.colorbar(im, ax=ax, ticks=ticks, **kwargs)
if ticklabels is None:
ticklabels = ['%.2f' % ii for ii in ticks]
cb.ax.set_yticklabels(ticklabels, color='dimgray')
cb.ax.xaxis.label.set_color('dimgray')
cb.ax.yaxis.label.set_color('dimgray')
cb.ax.spines['left'].set_color('dimgray')
cb.ax.spines['right'].set_color('dimgray')
box = cb.ax.get_children()[2]
box.set_edgecolor('dimgray')
return cb
def get_datalim(ax):
"""WIP"""
X, Y = [np.inf, -np.inf], [np.inf, -np.inf]
for line in ax.lines:
if not line.get_visible():
continue
x, y = line.get_data()
X[0] = np.min(np.hstack((x, X[0])))
X[1] = np.max(np.hstack((x, X[1])))
Y[0] = np.min(np.hstack((y, Y[0])))
Y[1] = np.max(np.hstack((y, Y[1])))
for patch in ax.patches:
if not patch.get_visible():
continue
x, y = patch.get_data()
X[0] = np.min(np.hstack((x, X[0])))
X[1] = np.max(np.hstack((x, X[1])))
Y[0] = np.min(np.hstack((y, Y[0])))
Y[1] = np.max(np.hstack((y, Y[1])))
return X, Y
def share_lim(axes):
"""WIP"""
X, Y = [np.inf, -np.inf], [np.inf, -np.inf]
for ax in axes:
x, y = get_datalim(ax)
X[0] = np.min(np.hstack((x, X[0])))
X[1] = np.max(np.hstack((x, X[1])))
Y[0] = np.min(np.hstack((y, Y[0])))
Y[1] = np.max(np.hstack((y, Y[1])))
for ax in axes:
ax.set_xlim(X[0], X[1])
ax.set_ylim(Y[0], Y[1])
return X, Y
def bar_sem(x, y=None, color='k', ax=None, bin_width=None, bottom=None,
aplha=.5):
if y is None:
y = np.array(x)
x = range(y.shape[1]) if y.ndim == 2 else range(len(y))
if ax is None:
ax = plt.gca()
y, x = np.array(y), np.array(x)
if (x.ndim > 1) or (x.shape[0] != y.shape[1]):
raise ValueError('x and y must share first axis')
if isinstance(color, str):
color = [color] * len(x)
elif isinstance(color, np.ndarray) and color.ndim == 1:
color = [color] * len(x)
means = np.nanmean(y, axis=0)
sems = np.nanstd(y, axis=0) / np.sqrt(y.shape[0])
if bin_width is None:
bin_width = np.diff(x[:2])
for mean, sem, bin_, this_color in zip(means, sems, x, color):
options = dict(color=this_color, edgecolor='none', linewidth=0,
width=bin_width, bottom=bottom)
ax.bar(bin_, mean + sem, alpha=aplha, **options)
ax.bar(bin_, mean - sem, alpha=aplha, **options)
ax.bar(bin_, mean, **options)
pretty_plot(ax)
return ax
class nonlinear_cmap(LinearSegmentedColormap):
def __init__(self, cmap, center=.5, clim=[0, 1]):
if isinstance(cmap, str):
self.cmap = plt.get_cmap(cmap)
self.clim = clim
self.center = center
for attr in self.cmap.__dict__.keys():
setattr(self, attr, self.cmap.__dict__[attr])
def __call__(self, value, alpha=1., **kwargs):
center = (self.center - self.clim[0]) / np.diff(self.clim)
value = (value - self.clim[0]) / np.diff(self.clim)
value[value < 0] = 0.
value[value > 1] = 1.
ilogval = logcenter(center, x=value, inverse=True)
return self.cmap(ilogval, alpha=alpha, **kwargs)
def pretty_axes(axes, xticks=None, xticklabels=None, yticks=None,
yticklabels=None, xlabel=None, ylabel=None, xlabelpad=-10,
ylabelpad=-10, xlim=None, ylim=None, aspect=None):
ax0 = axes.reshape(-1)[0]
fig = ax0.get_figure()
fig.canvas.draw()
xticks = ax0.get_xticks() if xticks is None else xticks
xticklabels = [tick.get_text() for tick in ax0.get_xticklabels()] \
if xticklabels is None else xticklabels
xlabel = ax0.get_xlabel() if xlabel is None else xlabel
xlim = ax0.get_xlim() if xlim is None else xlim
yticks = ax0.get_yticks() if yticks is None else yticks
yticklabels = [tick.get_text() for tick in ax0.get_yticklabels()] \
if yticklabels is None else yticklabels
ylabel = ax0.get_ylabel() if ylabel is None else ylabel
ylim = ax0.get_ylim() if ylim is None else ylim
aspect = ax0.get_aspect() if aspect is None else aspect
if axes.ndim == 1:
axes = np.reshape(axes, [1, len(axes)])
n, m = axes.shape
for ii in range(n):
for jj in range(m):
pretty_plot(axes[ii, jj])
axes[ii, jj].set_xticks(xticks)
axes[ii, jj].set_yticks(yticks)
fig.canvas.draw()
axes[ii, jj].set_xlim(xlim)
axes[ii, jj].set_ylim(ylim)
fig.canvas.draw()
if ii != (n - 1):
axes[ii, jj].set_xticklabels([''] * len(xticks))
axes[ii, jj].set_xlabel('')
else:
axes[ii, jj].set_xticklabels(xticklabels)
axes[ii, jj].set_xlabel(xlabel, labelpad=xlabelpad)
if jj != 0:
axes[ii, jj].set_yticklabels([''] * len(yticks))
axes[ii, jj].set_ylabel('')
else:
axes[ii, jj].set_yticklabels(yticklabels)
axes[ii, jj].set_ylabel(ylabel, labelpad=ylabelpad)
|
bsd-2-clause
|
flaviovdf/pyksc
|
src/scripts/plot_quality.py
|
1
|
4147
|
# -*- coding: utf8
from __future__ import division, print_function
from matplotlib import pyplot as plt
from pyksc import dist
from pyksc import metrics
from pyksc import ksc
from scripts import initialize_matplotlib
from vod.stats.ci import half_confidence_interval_size as hci
import argparse
import numpy as np
import os
import sys
import traceback
def run_clustering(X, k, dists_all):
cent, assign, shift, dists_cent = ksc.inc_ksc(X, k)
intra = metrics.avg_intra_dist(X, assign, dists_all)[0]
inter = metrics.avg_inter_dist(X, assign, dists_all)[0]
bcv = metrics.beta_cv(X, assign, dists_all)
cost = metrics.cost(X, assign, None, dists_cent)
return intra, inter, bcv, cost
def main(tseries_fpath, plot_foldpath):
assert os.path.isdir(plot_foldpath)
initialize_matplotlib()
X = np.genfromtxt(tseries_fpath)[:,1:].copy()
n_samples = X.shape[0]
sample_rows = np.arange(n_samples)
clust_range = range(2, 16)
n_clustering_vals = len(clust_range)
intra_array = np.zeros(shape=(25, n_clustering_vals))
inter_array = np.zeros(shape=(25, n_clustering_vals))
bcvs_array = np.zeros(shape=(25, n_clustering_vals))
costs_array = np.zeros(shape=(25, n_clustering_vals))
r = 0
for i in xrange(5):
np.random.shuffle(sample_rows)
rand_sample = sample_rows[:200]
X_new = X[rand_sample]
D_new = dist.dist_all(X_new, X_new, rolling=True)[0]
for j in xrange(5):
for k in clust_range:
intra, inter, bcv, cost = run_clustering(X_new, k, D_new)
intra_array[r, k - 2] = intra
inter_array[r, k - 2] = inter
bcvs_array[r, k - 2] = bcv
costs_array[r, k - 2] = cost
r += 1
print(r)
intra_err = np.zeros(n_clustering_vals)
inter_err = np.zeros(n_clustering_vals)
bcvs_err = np.zeros(n_clustering_vals)
costs_err = np.zeros(n_clustering_vals)
for k in clust_range:
j = k - 2
intra_err[j] = hci(intra_array[:,j], .95)
inter_err[j] = hci(inter_array[:,j], .95)
bcvs_err[j] = hci(bcvs_array[:,j], .95)
costs_err[j] = hci(costs_array[:,j], .95)
plt.errorbar(clust_range, np.mean(inter_array, axis=0), fmt='gD',
label='Inter Cluster', yerr=inter_err)
plt.errorbar(clust_range, np.mean(bcvs_array, axis=0), fmt='bo',
label='BetaCV', yerr=bcvs_err)
plt.errorbar(clust_range, np.mean(intra_array, axis=0), fmt='rs',
label='Intra Cluster', yerr=intra_err)
plt.ylabel('Average Distance')
plt.xlabel('Number of clusters')
plt.xlim((0., 16))
plt.ylim((0., 1.))
plt.legend(frameon=False, loc='lower left')
plt.savefig(os.path.join(plot_foldpath, 'bcv.pdf'))
plt.close()
plt.errorbar(clust_range, np.mean(costs_array, axis=0), fmt='bo',
label='Cost', yerr=costs_err)
plt.ylabel('Cost (F)')
plt.xlabel('Number of clusters')
plt.xlim((0., 16))
plt.ylim((0., 1.))
plt.legend(frameon=False, loc='lower left')
plt.savefig(os.path.join(plot_foldpath, 'cost.pdf'))
plt.close()
def create_parser(prog_name):
desc = __doc__
formatter = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(prog_name, description=desc,
formatter_class=formatter)
parser.add_argument('tseries_fpath', type=str, help='Time series file')
parser.add_argument('plot_foldpath', type=str, help='Folder to store plots')
return parser
def entry_point(args=None):
'''Fake main used to create argparse and call real one'''
if not args:
args = []
parser = create_parser(args[0])
values = parser.parse_args(args[1:])
try:
return main(values.tseries_fpath, values.plot_foldpath)
except:
traceback.print_exc()
parser.print_usage(file=sys.stderr)
return 1
if __name__ == '__main__':
sys.exit(entry_point(sys.argv))
|
bsd-3-clause
|
ThiagoGarciaAlves/intellij-community
|
python/helpers/pydev/pydev_ipython/qt_for_kernel.py
|
22
|
3574
|
""" Import Qt in a manner suitable for an IPython kernel.
This is the import used for the `gui=qt` or `matplotlib=qt` initialization.
Import Priority:
if Qt4 has been imported anywhere else:
use that
if matplotlib has been imported and doesn't support v2 (<= 1.0.1):
use PyQt4 @v1
Next, ask ETS' QT_API env variable
if QT_API not set:
ask matplotlib via rcParams['backend.qt4']
if it said PyQt:
use PyQt4 @v1
elif it said PySide:
use PySide
else: (matplotlib said nothing)
# this is the default path - nobody told us anything
try:
PyQt @v1
except:
fallback on PySide
else:
use PyQt @v2 or PySide, depending on QT_API
because ETS doesn't work with PyQt @v1.
"""
import os
import sys
from pydev_ipython.version import check_version
from pydev_ipython.qt_loaders import (load_qt, QT_API_PYSIDE,
QT_API_PYQT, QT_API_PYQT_DEFAULT,
loaded_api, QT_API_PYQT5)
#Constraints placed on an imported matplotlib
def matplotlib_options(mpl):
if mpl is None:
return
# #PyDev-779: In pysrc/pydev_ipython/qt_for_kernel.py, matplotlib_options should be replaced with latest from ipython
# (i.e.: properly check backend to decide upon qt4/qt5).
backend = mpl.rcParams.get('backend', None)
if backend == 'Qt4Agg':
mpqt = mpl.rcParams.get('backend.qt4', None)
if mpqt is None:
return None
if mpqt.lower() == 'pyside':
return [QT_API_PYSIDE]
elif mpqt.lower() == 'pyqt4':
return [QT_API_PYQT_DEFAULT]
elif mpqt.lower() == 'pyqt4v2':
return [QT_API_PYQT]
raise ImportError("unhandled value for backend.qt4 from matplotlib: %r" %
mpqt)
elif backend == 'Qt5Agg':
mpqt = mpl.rcParams.get('backend.qt5', None)
if mpqt is None:
return None
if mpqt.lower() == 'pyqt5':
return [QT_API_PYQT5]
raise ImportError("unhandled value for backend.qt5 from matplotlib: %r" %
mpqt)
# Fallback without checking backend (previous code)
mpqt = mpl.rcParams.get('backend.qt4', None)
if mpqt is None:
mpqt = mpl.rcParams.get('backend.qt5', None)
if mpqt is None:
return None
if mpqt.lower() == 'pyside':
return [QT_API_PYSIDE]
elif mpqt.lower() == 'pyqt4':
return [QT_API_PYQT_DEFAULT]
elif mpqt.lower() == 'pyqt5':
return [QT_API_PYQT5]
raise ImportError("unhandled value for qt backend from matplotlib: %r" %
mpqt)
def get_options():
"""Return a list of acceptable QT APIs, in decreasing order of
preference
"""
#already imported Qt somewhere. Use that
loaded = loaded_api()
if loaded is not None:
return [loaded]
mpl = sys.modules.get('matplotlib', None)
if mpl is not None and not check_version(mpl.__version__, '1.0.2'):
#1.0.1 only supports PyQt4 v1
return [QT_API_PYQT_DEFAULT]
if os.environ.get('QT_API', None) is None:
#no ETS variable. Ask mpl, then use either
return matplotlib_options(mpl) or [QT_API_PYQT_DEFAULT, QT_API_PYSIDE, QT_API_PYQT5]
#ETS variable present. Will fallback to external.qt
return None
api_opts = get_options()
if api_opts is not None:
QtCore, QtGui, QtSvg, QT_API = load_qt(api_opts)
else: # use ETS variable
from pydev_ipython.qt import QtCore, QtGui, QtSvg, QT_API
|
apache-2.0
|
iismd17/scikit-learn
|
sklearn/linear_model/tests/test_perceptron.py
|
378
|
1815
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
|
bsd-3-clause
|
institution/fpp
|
main.py
|
1
|
9816
|
import sys
from parse_svg import accept_mm, accept_path, accept_viewBox, make_path
from path import distance, Vec, Bezier1, project, Poly, Line
from path import intersect_poly_poly, intersect_poly_line, flattern_bezier_list
import xml.etree.ElementTree as ET
from log import fail, info, warning
from reader import Reader
import math
VERSION = '0.4.0'
TOLERANCE_MM = 0.1
STEP_MM = 0.5
SHOW_GUI = 0
PRINT_OUTPUT = 0
LINE_THICKNESS_MM = 0.18
if SHOW_GUI:
import matplotlib.pyplot as plt
"""
Note on units: every variable stores value in [u] (unless postfix _mm), use mm_to and to_mm for input, output
"""
"""
TODO: top siatka
TODO: print ruler with values to output ?
TODO: set STEP size show output ^
# TODO: add scale 1cm x 1cm box to output ?
# TODO: thinner line in output <- set to mm ?
# TODO: add cover start indicator?
# TODO: Alert on negative values on the profil -- check h value in calc_value
# TODO: think of some sanity check on output? ->
check last point == first point if applicable
rectangular test case
"""
#width="{width}"
#height="{height}"
OUTPUT_TEMPLATE = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
viewBox="{viewbox}"
id="svg2"
version="1.1">
<defs
id="defs4" />
<metadata
id="metadata7">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title></dc:title>
</cc:Work>
</rdf:RDF>
</metadata>
<path
id="{ident}"
d="{path}"
style="fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:{line_thickness_mm}mm;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;image-rendering:auto" />
</svg>
"""
def get_aabb(ps):
min_x = +2**30
min_y = +2**30
max_x = -2**30
max_y = -2**30
for x,y in ps:
if x > max_x:
max_x = x
if x < min_x:
min_x = x
if y > max_y:
max_y = y
if y < min_y:
min_y = y
return Vec(min_x,min_y),Vec(max_x,max_y)
def write_shape_to_svg(oname, ident, points, viewbox, to_mm):
"""
oname -- svg filename
points -- path points
viewbox -- (x,y,dx,dy)
to_mm -- conversion ratio
"""
x,y,dx,dy = viewbox
path = "M " + " ".join("{:.6f},{:.6f}".format(*p) for p in points)
with open(oname, 'wb') as f:
f.write(
OUTPUT_TEMPLATE.format(
path = path,
width = "{:.6f}mm".format(dx * to_mm),
height = "{:.6f}mm".format(dy * to_mm),
viewbox = "{:.6f} {:.6f} {:.6f} {:.6f}".format(*viewbox),
ident = ident,
line_thickness_mm = LINE_THICKNESS_MM,
).encode('utf-8')
)
info("written to {}".format(oname))
def save_top_svg(ps, oname, mar, to_mm):
assert len(ps) > 0
a,b = get_aabb(ps)
d = b - a
vb = (a[0]-mar,a[1]-mar,d[0]+2*mar,d[1]+2*mar)
write_shape_to_svg(oname = oname, ident='top', points = ps, viewbox = vb, to_mm = to_mm)
def save_side_svg(ps, oname, mar, to_mm):
assert len(ps) > 0
max_y = 0
for x,y in ps:
if y > max_y:
max_y = y
max_x = ps[-1][0]
min_x = ps[ 0][0]
w = max_x - min_x
h = max_y
ps.append((max_x,0))
ps.append((min_x,0))
ps.append(ps[0]) # close
vb = (min_x-mar,0-mar,w+2*mar,h+2*mar)
write_shape_to_svg(oname = oname, ident='side', points = ps, viewbox = vb, to_mm = to_mm)
def get_conversion_mm(vb, w_mm, h_mm):
"""
return -- to_mm, mm_to
"""
x0,y0,dx,dy = vb
to_mmx = w_mm/dx
to_mmy = h_mm/dy
assert math.isclose(to_mmx, to_mmy), (to_mmx, to_mmy)
to_mm = to_mmx
mm_to = dx/w_mm
return to_mm, mm_to
def read_poly_from_svg_path(root, name, tolerance):
x = root.find(".//*[@id='"+name+"']")
if x != None:
beziers = make_path(accept_path(Reader(x.get('d'))))
err, vertices = flattern_bezier_list(beziers, tolerance, name)
return Poly(vertices)
else:
return None
def show(point_obrys, point_profil, value_mm):
vis1, = Bezier1(point_obrys, point_profil).render(plt)
vis2 = plt.text(
x=point_profil[0] + 15,
y=point_profil[1],
s="{:.1f}mm".format(value_mm),
verticalalignment='center',
backgroundcolor='white',
#bbox=dict(facecolor='white', alpha=0.8),
#size=12
)
plt.show()
plt.pause(0.001)
#plt.waitforbuttonpress(timeout=-1)
vis1.remove()
vis2.remove()
def calc_value(pos, obrys, profil, odcinek, to_mm):
##TODO: Calculate sidewall and cover point
##TODO: (height, dist) -- sidewall height, distance along obrys from start point
odcinek_dir = odcinek.get_dir()
ort_odcinek = Vec(odcinek_dir[1], -odcinek_dir[0])
point_obrys = obrys.get_point(pos)
point_odcinek, _ = project(point = point_obrys, line = odcinek)
orto_line = Line(point_odcinek, point_odcinek + ort_odcinek)
orto_unit = orto_line.get_length()
cover_x,cover_y = None, None
ths = intersect_poly_line(poly = profil, line = orto_line)
#print(profil.xs, orto_line.p0, orto_line.p1)
#print(ths)
if len(ths) == 1:
t,h = ths[0]
point_profil = profil.get_point(t)
# assert all hs are having the same sign
h
cover_x = t
else:
fail('ERROR: unique intersection point of profil and orto_line is undefined')
_, cover_h = project(point = point_obrys, line = orto_line)
value = distance(point_odcinek, point_profil)
if SHOW_GUI:
show(point_obrys, point_profil, value * to_mm)
return value, Vec(cover_x, cover_h * orto_unit)
import os.path
def main():
info("FPP version: {}".format(VERSION))
if len(sys.argv) < 4:
info("usage: fpp <input.svg> <label1> <label2> [label3] ...")
sys.exit(0)
iname = sys.argv[1]
a = sys.argv[2]
for x in sys.argv[3:]:
b = x
main_segment(iname, a, b)
a = b
def main_segment(iname, start_label, end_label):
name = os.path.splitext(iname)[0]
info("opening: {!r}".format(iname))
tree = ET.parse(iname)
root = tree.getroot()
vb = accept_viewBox(Reader(root.get('viewBox')))
w_mm = accept_mm(Reader(root.get('width')))
h_mm = accept_mm(Reader(root.get('height')))
to_mm, mm_to = get_conversion_mm(vb, w_mm, h_mm)
info("width : {:.1f}mm".format(w_mm))
info("height: {:.1f}mm".format(h_mm))
#info("scale: 1mm is {:.3f}".format(1*mm_to))
#info("scale: 1 is {:.3f}mm".format(1*to_mm))
tolerance = TOLERANCE_MM * mm_to
profil = read_poly_from_svg_path(root, 'profil', tolerance)
if profil == None:
fail("ERROR: brak profilu na rysunku")
obrys = read_poly_from_svg_path(root, 'obrys', tolerance)
if obrys == None:
fail("ERROR: brak obrysu na rysunku")
info("obrys : length {:.1f}mm divided into {} segments".format(obrys.get_length()*to_mm, obrys.size()))
info("profil: length {:.1f}mm divided into {} segments".format(profil.get_length()*to_mm, profil.size()))
info("tolerance: {}mm".format(TOLERANCE_MM))
info("step size: {}mm".format(STEP_MM))
pos = 0.0
cross_poczatek = read_poly_from_svg_path(root, start_label, tolerance)
if cross_poczatek != None:
ths = intersect_poly_poly(obrys, cross_poczatek)
if len(ths) != 1:
fail("ERROR: start point not set")
else:
t,_ = ths[0]
pos = t
info("start: at {:.1f}mm".format(pos * to_mm))
else:
fail("ERROR: start point not set")
if end_label == start_label:
end = pos
info("end: at the beggining")
else:
cross_koniec = read_poly_from_svg_path(root, end_label, tolerance)
if cross_koniec != None:
ths = intersect_poly_poly(obrys, cross_koniec)
if len(ths) != 1:
info("end: present but not set")
else:
t,_ = ths[0]
end = t
info("end: at {:.1f}mm".format(end * to_mm))
else:
fail("ERROR: end point not set")
if pos < end:
delta = end - pos
else:
delta = obrys.get_length() - (pos - end)
assert delta > 0
assert delta <= obrys.get_length()
odcinek = Line(
profil.get_point(0),
profil.get_point(profil.get_length()),
)
# setup view
if SHOW_GUI:
plt.ion()
plt.show()
#plt.axis([vb[0], vb[0]+vb[2], vb[1], vb[1]+vb[3]])
profil.render(plt)
obrys.render(plt)
odcinek.render(plt)
if cross_poczatek:
cross_poczatek.render(plt)
if cross_koniec:
cross_koniec.render(plt)
rs = []
rs_cover = []
info("output length: {:.1f}mm".format(delta*to_mm))
info("running now...")
last_progress = 0
step = STEP_MM * mm_to
total = 0.0
while total < delta:
# print("pos,end = {:.1f},{:.1f}".format(pos*to_mm,end*to_mm))
# print("total,delta = {:.1f},{:.1f}".format(total*to_mm,delta*to_mm))
value, cover_p = calc_value(pos, obrys, profil, odcinek, to_mm)
if PRINT_OUTPUT:
print("OUTPUT: {:6.1f} {:6.1f} [mm] {:6.1f} {:6.1f} [u]".format(total*to_mm, value*to_mm, pos, value))
progress = int((total/delta) * 100)
if progress % 20 == 0 and progress != last_progress:
info("{:5}% done...".format(progress))
last_progress = progress
rs.append( (total, value) )
rs_cover.append( cover_p )
pos += step
if pos > obrys.get_length():
pos -= obrys.get_length()
total += step
# value at the end
total = delta
pos = end
value, cover_p = calc_value(pos, obrys, profil, odcinek, to_mm)
if PRINT_OUTPUT:
print("OUTPUT: {:6.1f} {:6.1f} [mm] {:6.1f} {:6.1f} [u]".format(total*to_mm, value*to_mm, pos, value))
rs.append((total, value))
rs_cover.append( cover_p )
info("{} points generated".format(len(rs)))
save_side_svg(rs, "{}-{}-{}-side.svg".format(name,start_label,end_label), 10*mm_to, to_mm)
save_top_svg(rs_cover, "{}-{}-{}-top.svg".format(name,start_label,end_label), 10*mm_to, to_mm)
if __name__ == '__main__':
main()
|
agpl-3.0
|
quaquel/EMAworkbench
|
ema_workbench/analysis/clusterer.py
|
1
|
2605
|
"""
This module provides time series clustering functionality using
complex invariant distance
"""
import itertools
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import sklearn.cluster as cluster
from ..util import get_module_logger
#
# Created on 11 Apr 2019
#
# .. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
#
__all__ = ['calculate_cid',
'plot_dendrogram',
'apply_agglomerative_clustering']
_logger = get_module_logger(__name__)
def CID(xi, xj, ce_i, ce_j):
return np.linalg.norm(xi - xj) * (max(ce_i, ce_j) / min(ce_i, ce_j))
def calculate_cid(data, condensed_form=False):
"""calculate the complex invariant distance between all rows
Parameters
----------
data : 2d ndarray
condensed_form : bool, optional
Returns
-------
distances
a 2D ndarray with the distances between all time series, or condensed
form similar to scipy.spatial.distance.pdist¶
"""
ce = np.sqrt(np.sum(np.diff(data, axis=1) ** 2, axis=1))
indices = np.arange(0, data.shape[0])
cid = np.zeros((data.shape[0], data.shape[0]))
for i, j in itertools.combinations(indices, 2):
xi = data[i, :]
xj = data[j, :]
ce_i = ce[i]
ce_j = ce[j]
distance = CID(xi, xj, ce_i, ce_j)
cid[i, j] = distance
cid[j, i] = distance
if not condensed_form:
return cid
else:
return sp.spatial.distance.squareform(cid)
def plot_dendrogram(distances):
"""plot dendrogram for distances
"""
if distances.ndim == 2:
distances = sp.spatial.distance.squareform(distances)
linked = sp.cluster.hierarchy.linkage(distances) # @UndefinedVariable
fig = plt.figure()
sp.cluster.hierarchy.dendrogram(linked, # @UndefinedVariable
orientation='top',
distance_sort='descending',
show_leaf_counts=True)
return fig
def apply_agglomerative_clustering(distances, n_clusters, linkage='average'):
"""apply agglomerative clustering to the distances
Parameters
----------
distances : ndarray
n_clusters : int
linkage : {'average', 'complete', 'single'}
Returns
-------
1D ndarray with cluster assignment
"""
c = cluster.AgglomerativeClustering(n_clusters=n_clusters,
affinity='precomputed',
linkage=linkage)
clusters = c.fit_predict(distances)
return clusters
|
bsd-3-clause
|
xzh86/scikit-learn
|
sklearn/preprocessing/data.py
|
113
|
56747
|
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,
min_max_axis, inplace_row_scale)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
std_ = _handle_zeros_in_scale(std_)
else:
std_ = None
return mean_, std_
def _handle_zeros_in_scale(scale):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0:
scale = 1.
elif isinstance(scale, np.ndarray):
scale[scale == 0.0] = 1.0
scale[~np.isfinite(scale)] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
Xr /= std_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# std_ is very small so that mean_2 = mean_1/std_ > 0, even if
# mean_1 was close to zero. The problem is thus essentially due
# to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
data_range = _handle_zeros_in_scale(data_range)
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
Set to one if the standard deviation is zero for a given feature.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_ = _handle_zeros_in_scale(self.std_)
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
scales = np.maximum(np.abs(mins), np.abs(maxs))
else:
scales = np.abs(X).max(axis=0)
scales = np.array(scales)
scales = scales.reshape(-1)
self.scale_ = _handle_zeros_in_scale(scales)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : array-like or CSR matrix.
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
else:
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MaxAbsScaler(copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
elif self.axis == 0:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like.
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
|
bsd-3-clause
|
jadhavhninad/-CSE_515_MWD_Analytics-
|
Phase 2/DEMO/Phase 2 submissions/Phase 2 Submission/Code/MWDBProject/mwd_proj/mwd_proj/scripts_p2/Ninad/user_movie_matrix.py
|
2
|
7837
|
'''
Generating a user-genre matrix.
Ranking the genres of a movie based on
sum of movies rankings of a genre * year_wt of a movie (assumptiom : movies watched recently shows the latest choices of a user )
Based on the ranking of genres,
recommend 5 unwatched movies total starting from the genres that are higher ranked.
'''
from mysqlConn import DbConnect
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
import argparse
import operator
from math import log,exp
import pprint
#DB connector and curosor
db = DbConnect()
db_conn = db.get_connection()
cur2 = db_conn.cursor();
#Argument parser
parser = argparse.ArgumentParser()
parser.add_argument("USER")
args = parser.parse_args()
#==========================================================
#TASK - 1 : PRE - PROCESSING
#==========================================================
#SUB -TASK 1 - Cleaning the mlmovies table. Getting single row for a single genre.
#a. Create a new table mlmovies_clean that has a single entry for a single genre.
#b. For each entry in the mlmovies create an entry in mlmovies_clean that has a unique genre entry.
cur2.execute("create table `mlmovies_clean`(movieid varchar(10) NOT NULL, moviename varchar(200) NOT NULL, year varchar(4) NOT NULL, genres varchar(200) NOT NULL)")
query1 = "SELECT * FROM `mlmovies`"
cur2.execute(query1)
result1 = cur2.fetchall()
row_count = 0
#For each tagID get the movie list
for entry in result1:
mvid = entry[0]
mvname = entry[1]
year = entry[2]
combo_genres = entry[3].split("|")
#Add new row for each genre.
for genre in combo_genres:
cur2.execute('INSERT INTO `mlmovies_clean`(movieid, moviename, year, genres) VALUES(%s, %s, %s, %s)', (mvid,mvname,year,genre))
if row_count >= 1000:
db_conn.commit()
row_count = 0
db_conn.commit()
#----------------------------------------------------------------------
#====================================================================
#TASK - 2 : Weights of movies based on year using exponential decay
#====================================================================
#Get the max year.
cur2.execute("SELECT max(year) FROM mlmovies")
max_year = int(cur2.fetchone()[0])
#add a column year_weight in the table mlmovies.
cur2.execute("Alter table `mlmovies` add column year_wt FLOAT(15) NOT NULL")
cur2.execute("SELECT year FROM `mlmovies`")
result = cur2.fetchall()
# k = decay constant. Appropriate decay constant is used so that the exponential
# values stay within limit (after trying with 1,0.1,0.01,0.001) and
k=0.1
for movie_year in result:
current_year = int(movie_year[0])
diff = max_year - current_year
movie_wt = float(exp(-k*diff))
cur2.execute("UPDATE `mlmovies` set year_wt = %s where year = %s",(movie_wt,movie_year[0]))
db_conn.commit()
#====================================================================
#Task-3 :Calculate the user_genre matrix
#====================================================================
dd_users_genre = {}
#Get all the users
#cur2.execute("SELECT userid FROM `mlusers` limit 500")
#Test all the 22000 users.
cur2.execute("SELECT userid FROM `mlusers`")
result0 = cur2.fetchall();
for usr in result0:
#print usr[0]
dd_users_genre[usr[0]] = {}
#Get all movies watched(and hence rated) by each user.
cur2.execute("SELECT movieid, rating FROM `mlratings` where userid = %s",usr)
result1 = cur2.fetchall()
for data1 in result1:
#print data1
user_movie_id = {data1[0],}
user_movie_rating = data1[1]
#for each movie add the genre weight.IN this way, genres will be ranked based on highest watched to
#lowest watched for a user. Movies tagged or rated newly will have higher rank as year_wt is more.
#But if movie rating is bad, then rank will drop as we multiply both year and rating
cur2.execute("SELECT genres FROM `mlmovies_clean` where movieid = %s", user_movie_id)
result2 = cur2.fetchall()
for vals in result2:
#print vals
cur2.execute("SELECT year_wt FROM `mlmovies` where genres = %s", vals)
mv_weight = cur2.fetchone()[0]
if vals[0] in dd_users_genre[usr[0]]:
dd_users_genre[usr[0]][vals[0]] += (mv_weight * int(user_movie_rating))
else:
dd_users_genre[usr[0]][vals[0]] = mv_weight * int(user_movie_rating)
#WE need to do this again for mltags because it does not have a rating, so we give an avg below
# rating of 2.
# Get all movies tagged by each user. If movie is only tagged and not rated, then give rating of 2 (avg).
cur2.execute("SELECT movieid FROM `mltags` where userid = %s", usr)
result2 = cur2.fetchall()
for data in result2:
#print data1
user_movie_id = {data[0],}
cur2.execute("SELECT genres FROM `mlmovies_clean` where movieid = %s", user_movie_id)
result2 = cur2.fetchall()
for vals in result2:
cur2.execute("SELECT year_wt FROM `mlmovies` where genres = %s", vals)
mv_weight = cur2.fetchone()[0]
if vals[0] in dd_users_genre[usr[0]]:
dd_users_genre[usr[0]][vals[0]] += (mv_weight * 2)
else:
dd_users_genre[usr[0]][vals[0]] = mv_weight * 2
#Make rating of other genres to verylow.
cur2.execute("SELECT DISTINCT genres FROM `mlmovies_clean`")
genreNames = cur2.fetchall()
for keyval in genreNames:
key = keyval[0]
#print key
if key in dd_users_genre[usr[0]]:
continue
else:
dd_users_genre[usr[0]][key] = 0.000001
#pprint.pprint(dd_users_genre)
usr_genre_matrix = pd.DataFrame(dd_users_genre)
#usr_genre_matrix = usr_genre_matrix.T
#pprint.pprint(usr_genre_matrix)
usr_genre_matrix.to_csv("out.csv", sep='\t')
#========================================================================================
#Task:4 - Recommend top 5 unwatched movies starting from the best ranked genre for a user
#=========================================================================================
#This data can also be precomputed and stored
userWatchedMovies = []
cur2.execute("SELECT movieid FROM `mlratings` where userid = %s",[args.USER])
result0 = cur2.fetchall()
for data in result0:
userWatchedMovies.append(data[0])
cur2.execute("SELECT movieid FROM `mltags` where userid = %s",[args.USER])
result0 = cur2.fetchall()
for data in result0:
userWatchedMovies.append(data[0])
print "-----Watched movies-------"
for watched_ids in userWatchedMovies:
cur2.execute("SELECT moviename,genres FROM `mlmovies` where movieid = %s", {watched_ids, })
print cur2.fetchone()
user_genres_vals = sorted(list([usr_genre_matrix[args.USER]]),key=operator.itemgetter(1), reverse=True)
#print user_genres_vals
print "----------------------------------------------------------------------"
print "-------Genre weights based in user rating of movie and movie year-----"
sorted_x = sorted(dd_users_genre[args.USER].items(), key=operator.itemgetter(1), reverse=True)
pprint.pprint(sorted_x)
count=0
recommend=[]
print "-------------------------------"
print "-------Recommended movies------"
for keys in sorted_x:
#print keys[0]
cur2.execute("SELECT movieid FROM `mlmovies_clean` where genres = %s", {keys[0],})
result0 = cur2.fetchall()
for data in result0:
if data[0] in userWatchedMovies:
continue
else:
recommend.append(data[0])
count+=1
if count==5 : break
if count == 5: break
for rec_ids in recommend:
#print rec_ids
cur2.execute("SELECT moviename,genres FROM `mlmovies` where movieid = %s", {rec_ids, })
print cur2.fetchone()
|
gpl-3.0
|
yunfeilu/scikit-learn
|
examples/bicluster/plot_spectral_biclustering.py
|
403
|
2011
|
"""
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
|
bsd-3-clause
|
andyraib/data-storage
|
python_scripts/env/lib/python3.6/site-packages/pandas/io/tests/test_pickle.py
|
7
|
10831
|
# pylint: disable=E1101,E1103,W0232
""" manage legacy pickle tests """
import nose
import os
from distutils.version import LooseVersion
import pandas as pd
from pandas import Index
from pandas.compat import u, is_platform_little_endian
import pandas
import pandas.util.testing as tm
from pandas.tseries.offsets import Day, MonthEnd
class TestPickle():
"""
How to add pickle tests:
1. Install pandas version intended to output the pickle.
2. Execute "generate_legacy_storage_files.py" to create the pickle.
$ python generate_legacy_storage_files.py <output_dir> pickle
3. Move the created pickle to "data/legacy_pickle/<version>" directory.
NOTE: TestPickle can't be a subclass of tm.Testcase to use test generator.
http://stackoverflow.com/questions/6689537/
nose-test-generators-inside-class
"""
_multiprocess_can_split_ = True
def setUp(self):
from pandas.io.tests.generate_legacy_storage_files import (
create_pickle_data)
self.data = create_pickle_data()
self.path = u('__%s__.pickle' % tm.rands(10))
def compare_element(self, result, expected, typ, version=None):
if isinstance(expected, Index):
tm.assert_index_equal(expected, result)
return
if typ.startswith('sp_'):
comparator = getattr(tm, "assert_%s_equal" % typ)
comparator(result, expected, exact_indices=False)
elif typ == 'timestamp':
if expected is pd.NaT:
assert result is pd.NaT
else:
tm.assert_equal(result, expected)
tm.assert_equal(result.freq, expected.freq)
else:
comparator = getattr(tm, "assert_%s_equal" %
typ, tm.assert_almost_equal)
comparator(result, expected)
def compare(self, vf, version):
# py3 compat when reading py2 pickle
try:
data = pandas.read_pickle(vf)
except (ValueError) as e:
if 'unsupported pickle protocol:' in str(e):
# trying to read a py3 pickle in py2
return
else:
raise
for typ, dv in data.items():
for dt, result in dv.items():
try:
expected = self.data[typ][dt]
except (KeyError):
if version in ('0.10.1', '0.11.0') and dt == 'reg':
break
else:
raise
# use a specific comparator
# if available
comparator = "compare_{typ}_{dt}".format(typ=typ, dt=dt)
comparator = getattr(self, comparator, self.compare_element)
comparator(result, expected, typ, version)
return data
def compare_sp_series_ts(self, res, exp, typ, version):
# SparseTimeSeries integrated into SparseSeries in 0.12.0
# and deprecated in 0.17.0
if version and LooseVersion(version) <= "0.12.0":
tm.assert_sp_series_equal(res, exp, check_series_type=False)
else:
tm.assert_sp_series_equal(res, exp)
def compare_series_ts(self, result, expected, typ, version):
# GH 7748
tm.assert_series_equal(result, expected)
tm.assert_equal(result.index.freq, expected.index.freq)
tm.assert_equal(result.index.freq.normalize, False)
tm.assert_series_equal(result > 0, expected > 0)
# GH 9291
freq = result.index.freq
tm.assert_equal(freq + Day(1), Day(2))
res = freq + pandas.Timedelta(hours=1)
tm.assert_equal(isinstance(res, pandas.Timedelta), True)
tm.assert_equal(res, pandas.Timedelta(days=1, hours=1))
res = freq + pandas.Timedelta(nanoseconds=1)
tm.assert_equal(isinstance(res, pandas.Timedelta), True)
tm.assert_equal(res, pandas.Timedelta(days=1, nanoseconds=1))
def compare_series_dt_tz(self, result, expected, typ, version):
# 8260
# dtype is object < 0.17.0
if LooseVersion(version) < '0.17.0':
expected = expected.astype(object)
tm.assert_series_equal(result, expected)
else:
tm.assert_series_equal(result, expected)
def compare_series_cat(self, result, expected, typ, version):
# Categorical dtype is added in 0.15.0
# ordered is changed in 0.16.0
if LooseVersion(version) < '0.15.0':
tm.assert_series_equal(result, expected, check_dtype=False,
check_categorical=False)
elif LooseVersion(version) < '0.16.0':
tm.assert_series_equal(result, expected, check_categorical=False)
else:
tm.assert_series_equal(result, expected)
def compare_frame_dt_mixed_tzs(self, result, expected, typ, version):
# 8260
# dtype is object < 0.17.0
if LooseVersion(version) < '0.17.0':
expected = expected.astype(object)
tm.assert_frame_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
def compare_frame_cat_onecol(self, result, expected, typ, version):
# Categorical dtype is added in 0.15.0
# ordered is changed in 0.16.0
if LooseVersion(version) < '0.15.0':
tm.assert_frame_equal(result, expected, check_dtype=False,
check_categorical=False)
elif LooseVersion(version) < '0.16.0':
tm.assert_frame_equal(result, expected, check_categorical=False)
else:
tm.assert_frame_equal(result, expected)
def compare_frame_cat_and_float(self, result, expected, typ, version):
self.compare_frame_cat_onecol(result, expected, typ, version)
def compare_index_period(self, result, expected, typ, version):
tm.assert_index_equal(result, expected)
tm.assertIsInstance(result.freq, MonthEnd)
tm.assert_equal(result.freq, MonthEnd())
tm.assert_equal(result.freqstr, 'M')
tm.assert_index_equal(result.shift(2), expected.shift(2))
def compare_sp_frame_float(self, result, expected, typ, version):
if LooseVersion(version) <= '0.18.1':
tm.assert_sp_frame_equal(result, expected, exact_indices=False,
check_dtype=False)
else:
tm.assert_sp_frame_equal(result, expected)
def read_pickles(self, version):
if not is_platform_little_endian():
raise nose.SkipTest("known failure on non-little endian")
pth = tm.get_data_path('legacy_pickle/{0}'.format(str(version)))
n = 0
for f in os.listdir(pth):
vf = os.path.join(pth, f)
data = self.compare(vf, version)
if data is None:
continue
n += 1
assert n > 0, 'Pickle files are not tested'
def test_pickles(self):
pickle_path = tm.get_data_path('legacy_pickle')
n = 0
for v in os.listdir(pickle_path):
pth = os.path.join(pickle_path, v)
if os.path.isdir(pth):
yield self.read_pickles, v
n += 1
assert n > 0, 'Pickle files are not tested'
def test_round_trip_current(self):
try:
import cPickle as c_pickle
def c_pickler(obj, path):
with open(path, 'wb') as fh:
c_pickle.dump(obj, fh, protocol=-1)
def c_unpickler(path):
with open(path, 'rb') as fh:
fh.seek(0)
return c_pickle.load(fh)
except:
c_pickler = None
c_unpickler = None
import pickle as python_pickle
def python_pickler(obj, path):
with open(path, 'wb') as fh:
python_pickle.dump(obj, fh, protocol=-1)
def python_unpickler(path):
with open(path, 'rb') as fh:
fh.seek(0)
return python_pickle.load(fh)
for typ, dv in self.data.items():
for dt, expected in dv.items():
for writer in [pd.to_pickle, c_pickler, python_pickler]:
if writer is None:
continue
with tm.ensure_clean(self.path) as path:
# test writing with each pickler
writer(expected, path)
# test reading with each unpickler
result = pd.read_pickle(path)
self.compare_element(result, expected, typ)
if c_unpickler is not None:
result = c_unpickler(path)
self.compare_element(result, expected, typ)
result = python_unpickler(path)
self.compare_element(result, expected, typ)
def test_pickle_v0_14_1(self):
# we have the name warning
# 10482
with tm.assert_produces_warning(UserWarning):
cat = pd.Categorical(values=['a', 'b', 'c'],
categories=['a', 'b', 'c', 'd'],
name='foobar', ordered=False)
pickle_path = os.path.join(tm.get_data_path(),
'categorical_0_14_1.pickle')
# This code was executed once on v0.14.1 to generate the pickle:
#
# cat = Categorical(labels=np.arange(3), levels=['a', 'b', 'c', 'd'],
# name='foobar')
# with open(pickle_path, 'wb') as f: pickle.dump(cat, f)
#
tm.assert_categorical_equal(cat, pd.read_pickle(pickle_path))
def test_pickle_v0_15_2(self):
# ordered -> _ordered
# GH 9347
# we have the name warning
# 10482
with tm.assert_produces_warning(UserWarning):
cat = pd.Categorical(values=['a', 'b', 'c'],
categories=['a', 'b', 'c', 'd'],
name='foobar', ordered=False)
pickle_path = os.path.join(tm.get_data_path(),
'categorical_0_15_2.pickle')
# This code was executed once on v0.15.2 to generate the pickle:
#
# cat = Categorical(labels=np.arange(3), levels=['a', 'b', 'c', 'd'],
# name='foobar')
# with open(pickle_path, 'wb') as f: pickle.dump(cat, f)
#
tm.assert_categorical_equal(cat, pd.read_pickle(pickle_path))
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
# '--with-coverage', '--cover-package=pandas.core'],
exit=False)
|
apache-2.0
|
google-research/google-research
|
graph_embedding/dmon/train_dgi_batched.py
|
1
|
5453
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""TODO(tsitsulin): add headers, tests, and improve style."""
from absl import app
from absl import flags
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import normalized_mutual_info_score
import tensorflow.compat.v2 as tf
from graph_embedding.dmon.layers.gcn import GCN
from graph_embedding.dmon.models.dgi import deep_graph_infomax
from graph_embedding.dmon.synthetic_data.graph_util import construct_knn_graph
from graph_embedding.dmon.synthetic_data.overlapping_gaussians import line_gaussians
from graph_embedding.dmon.utilities.batching import make_batch
from graph_embedding.dmon.utilities.batching import random_batch
from graph_embedding.dmon.utilities.shuffling import shuffle_inbatch
tf.compat.v1.enable_v2_behavior()
FLAGS = flags.FLAGS
flags.DEFINE_integer(
'n_nodes', 1000, 'Number of nodes for the synthetic graph.', lower_bound=0)
flags.DEFINE_integer(
'n_clusters',
2,
'Number of clusters for the synthetic graph.',
lower_bound=0)
flags.DEFINE_integer(
'batch_size', 16, 'Batch size to use for training.', lower_bound=0)
flags.DEFINE_float(
'train_size', 0.2, 'Training data proportion.', lower_bound=0)
flags.DEFINE_integer(
'n_epochs', 200, 'Number of epochs to train.', lower_bound=0)
flags.DEFINE_float(
'learning_rate', 0.01, 'Optimizer\'s learning rate.', lower_bound=0)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
print('Bröther may i have some self-lööps')
n_nodes = FLAGS.n_nodes
n_clusters = FLAGS.n_clusters
train_size = FLAGS.train_size
batch_size = FLAGS.batch_size
data_clean, data_dirty, labels = line_gaussians(n_nodes, n_clusters)
graph_clean = construct_knn_graph(data_clean)
n_neighbors = [15, 10] # TODO(tsitsulin): move to FLAGS.
total_matrix_size = 1 + np.cumprod(n_neighbors).sum()
train_mask = np.zeros(n_nodes, dtype=np.bool)
train_mask[np.random.choice(
np.arange(n_nodes), int(n_nodes * train_size), replace=False)] = True
test_mask = ~train_mask
print(
f'Data shape: {data_clean.shape}, graph shape: {graph_clean.shape}'
)
print(f'Train size: {train_mask.sum()}, test size: {test_mask.sum()}')
input_features = tf.keras.layers.Input(shape=(
total_matrix_size,
2,
))
input_features_corrupted = tf.keras.layers.Input(
shape=(
total_matrix_size,
2,
))
input_graph = tf.keras.layers.Input((
total_matrix_size,
total_matrix_size,
))
encoder = [GCN(64), GCN(32), tf.keras.layers.Lambda(lambda x: x[0][:, 0, :])]
model = deep_graph_infomax(
[input_features, input_features_corrupted, input_graph], encoder)
def loss(model, x, y, training):
_, y_ = model(x, training=training)
return loss_object(y_true=y, y_pred=y_)
def grad(model, inputs, targets):
with tf.GradientTape() as tape:
loss_value = loss(model, inputs, targets, training=True)
for loss_internal in model.losses:
loss_value += loss_internal
return loss_value, tape.gradient(loss_value, model.trainable_variables)
labels_dgi = tf.concat([tf.zeros([batch_size, 1]),
tf.ones([batch_size, 1])], 0)
loss_object = tf.keras.losses.BinaryCrossentropy(from_logits=True)
optimizer = tf.keras.optimizers.Adam(FLAGS.learning_rate)
for epoch in range(FLAGS.n_epochs):
subgraph_mat, features_mat, _, nonzero_indices = random_batch(
graph_clean, data_dirty, batch_size, n_neighbors)
perc_shuffle = 1 # np.linspace(1, 0.25, max_epoch)[epoch]
features_corrupted = shuffle_inbatch(features_mat, nonzero_indices,
perc_shuffle)
loss_value, grads = grad(model,
[features_mat, features_corrupted, subgraph_mat],
labels_dgi)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
print(
f'epoch {epoch}, loss: {loss_value.numpy():.4f}, shuffle %: {100*perc_shuffle:.2f}'
)
subgraph_mat, features_mat, _ = make_batch(graph_clean, data_dirty,
np.arange(n_nodes), n_neighbors)
representations, _ = model([features_mat, features_mat, subgraph_mat],
training=False)
representations = representations.numpy()
clf = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf.fit(representations[train_mask], labels[train_mask])
clusters = clf.predict(representations[test_mask])
print(
'NMI:',
normalized_mutual_info_score(
labels[test_mask], clusters, average_method='arithmetic'))
print('Accuracy:', 100 * accuracy_score(labels[test_mask], clusters))
if __name__ == '__main__':
app.run(main)
|
apache-2.0
|
frank-tancf/scikit-learn
|
sklearn/cross_validation.py
|
5
|
67523
|
"""
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
from .gaussian_process.kernels import Kernel as GPKernel
from .exceptions import FitFailedWarning
warnings.warn("This module has been deprecated in favor of the "
"model_selection module into which all the refactored classes "
"and functions are moved. Also note that the interface of the "
"new CV iterators are different from that of this module. "
"This module will be removed in 0.20.", DeprecationWarning)
__all__ = ['KFold',
'LabelKFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'LabelShuffleSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used as a validation set once while the k - 1 remaining
fold(s) form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LabelKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping labels.
The same label will not appear in two different folds (the number of
distinct labels has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct labels is approximately the same in each fold.
.. versionadded:: 0.17
Parameters
----------
labels : array-like with shape (n_samples, )
Contains a label for each sample.
The folds are built so that the same label does not appear in two
different folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
Examples
--------
>>> from sklearn.cross_validation import LabelKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> labels = np.array([0, 0, 2, 2])
>>> label_kfold = LabelKFold(labels, n_folds=2)
>>> len(label_kfold)
2
>>> print(label_kfold)
sklearn.cross_validation.LabelKFold(n_labels=4, n_folds=2)
>>> for train_index, test_index in label_kfold:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def __init__(self, labels, n_folds=3):
super(LabelKFold, self).__init__(len(labels), n_folds,
shuffle=False, random_state=None)
unique_labels, labels = np.unique(labels, return_inverse=True)
n_labels = len(unique_labels)
if n_folds > n_labels:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of labels: {1}.").format(n_folds,
n_labels))
# Weight labels by their number of occurrences
n_samples_per_label = np.bincount(labels)
# Distribute the most frequent labels first
indices = np.argsort(n_samples_per_label)[::-1]
n_samples_per_label = n_samples_per_label[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(n_folds)
# Mapping from label index to fold index
label_to_fold = np.zeros(len(unique_labels))
# Distribute samples by adding the largest weight to the lightest fold
for label_index, weight in enumerate(n_samples_per_label):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
label_to_fold[indices[label_index]] = lightest_fold
self.idxs = label_to_fold[labels]
def _iter_test_indices(self):
for f in range(self.n_folds):
yield np.where(self.idxs == f)[0]
def __repr__(self):
return '{0}.{1}(n_labels={2}, n_folds={3})'.format(
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if np.all(self.n_folds > label_counts):
raise ValueError("All the n_labels for individual classes"
" are less than %d folds."
% (self.n_folds))
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) + len(test) < self.n_train + self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
n_missing_train = self.n_train - len(train)
n_missing_test = self.n_test - len(test)
if n_missing_train > 0:
train.extend(missing_idx[:n_missing_train])
if n_missing_test > 0:
test.extend(missing_idx[-n_missing_test:])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
class LabelShuffleSplit(ShuffleSplit):
"""Shuffle-Labels-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided label. This label information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LabelShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique labels,
whereas LabelShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique labels.
For example, a less computationally intensive alternative to
``LeavePLabelOut(labels, p=10)`` would be
``LabelShuffleSplit(labels, test_size=10, n_iter=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to labels, and
not to samples, as in ShuffleSplit.
.. versionadded:: 0.17
Parameters
----------
labels : array, [n_samples]
Labels of samples
n_iter : int (default 5)
Number of re-shuffling and splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the test split. If
int, represents the absolute number of test labels. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the train split. If
int, represents the absolute number of train labels. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
"""
def __init__(self, labels, n_iter=5, test_size=0.2, train_size=None,
random_state=None):
classes, label_indices = np.unique(labels, return_inverse=True)
super(LabelShuffleSplit, self).__init__(
len(classes),
n_iter=n_iter,
test_size=test_size,
train_size=train_size,
random_state=random_state)
self.labels = labels
self.classes = classes
self.label_indices = label_indices
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.labels,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _iter_indices(self):
for label_train, label_test in super(LabelShuffleSplit,
self)._iter_indices():
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(self.label_indices, label_train))
test = np.flatnonzero(np.in1d(self.label_indices, label_test))
yield train, test
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
preds = [p for p, _ in preds_blocks]
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_locs = np.empty(len(locs), dtype=int)
inv_locs[locs] = np.arange(len(locs))
# Check for sparse predictions
if sp.issparse(preds[0]):
preds = sp.vstack(preds, format=preds[0].format)
else:
preds = np.concatenate(preds)
return preds[inv_locs]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel) \
and not isinstance(estimator.kernel, GPKernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if classifier is True and ``y`` is binary or
multiclass, :class:`StratifiedKFold` used. In all other cases,
:class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
.. versionadded:: 0.16
preserves input type instead of always casting to numpy array.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
.. versionadded:: 0.17
*stratify* splitting
Returns
-------
splitting : list, length = 2 * len(arrays),
List containing train-test split of inputs.
.. versionadded:: 0.16
Output type is the same as the input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
stratify = options.pop('stratify', None)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
cv = StratifiedShuffleSplit(stratify, test_size=test_size,
train_size=train_size,
random_state=random_state)
else:
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
|
bsd-3-clause
|
mahak/spark
|
python/pyspark/pandas/tests/test_numpy_compat.py
|
15
|
8672
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pyspark import pandas as ps
from pyspark.pandas import set_option, reset_option
from pyspark.pandas.numpy_compat import unary_np_spark_mappings, binary_np_spark_mappings
from pyspark.testing.pandasutils import PandasOnSparkTestCase
from pyspark.testing.sqlutils import SQLTestUtils
class NumPyCompatTest(PandasOnSparkTestCase, SQLTestUtils):
blacklist = [
# Koalas does not currently support
"conj",
"conjugate",
"isnat",
"matmul",
"frexp",
# Values are close enough but tests failed.
"arccos",
"exp",
"expm1",
"log", # flaky
"log10", # flaky
"log1p", # flaky
"modf",
"floor_divide", # flaky
# Results seem inconsistent in a different version of, I (Hyukjin) suspect, PyArrow.
# From PyArrow 0.15, seems it returns the correct results via PySpark. Probably we
# can enable it later when Koalas switches to PyArrow 0.15 completely.
"left_shift",
]
@property
def pdf(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
@property
def psdf(self):
return ps.from_pandas(self.pdf)
def test_np_add_series(self):
psdf = self.psdf
pdf = self.pdf
if LooseVersion(pd.__version__) < LooseVersion("0.25"):
self.assert_eq(np.add(psdf.a, psdf.b), np.add(pdf.a, pdf.b).rename())
else:
self.assert_eq(np.add(psdf.a, psdf.b), np.add(pdf.a, pdf.b))
psdf = self.psdf
pdf = self.pdf
self.assert_eq(np.add(psdf.a, 1), np.add(pdf.a, 1))
def test_np_add_index(self):
k_index = self.psdf.index
p_index = self.pdf.index
self.assert_eq(np.add(k_index, k_index), np.add(p_index, p_index))
def test_np_unsupported_series(self):
psdf = self.psdf
with self.assertRaisesRegex(NotImplementedError, "pandas.*not.*support.*sqrt.*"):
np.sqrt(psdf.a, psdf.b)
def test_np_unsupported_frame(self):
psdf = self.psdf
with self.assertRaisesRegex(NotImplementedError, "on-Spark.*not.*support.*sqrt.*"):
np.sqrt(psdf, psdf)
def test_np_spark_compat_series(self):
# Use randomly generated dataFrame
pdf = pd.DataFrame(
np.random.randint(-100, 100, size=(np.random.randint(100), 2)), columns=["a", "b"]
)
pdf2 = pd.DataFrame(
np.random.randint(-100, 100, size=(len(pdf), len(pdf.columns))), columns=["a", "b"]
)
psdf = ps.from_pandas(pdf)
psdf2 = ps.from_pandas(pdf2)
for np_name, spark_func in unary_np_spark_mappings.items():
np_func = getattr(np, np_name)
if np_name not in self.blacklist:
try:
# unary ufunc
self.assert_eq(np_func(pdf.a), np_func(psdf.a), almost=True)
except Exception as e:
raise AssertionError("Test in '%s' function was failed." % np_name) from e
for np_name, spark_func in binary_np_spark_mappings.items():
np_func = getattr(np, np_name)
if np_name not in self.blacklist:
try:
# binary ufunc
if LooseVersion(pd.__version__) < LooseVersion("0.25"):
self.assert_eq(
np_func(pdf.a, pdf.b).rename(), np_func(psdf.a, psdf.b), almost=True
)
else:
self.assert_eq(np_func(pdf.a, pdf.b), np_func(psdf.a, psdf.b), almost=True)
self.assert_eq(np_func(pdf.a, 1), np_func(psdf.a, 1), almost=True)
except Exception as e:
raise AssertionError("Test in '%s' function was failed." % np_name) from e
# Test only top 5 for now. 'compute.ops_on_diff_frames' option increases too much time.
try:
set_option("compute.ops_on_diff_frames", True)
for np_name, spark_func in list(binary_np_spark_mappings.items())[:5]:
np_func = getattr(np, np_name)
if np_name not in self.blacklist:
try:
# binary ufunc
if LooseVersion(pd.__version__) < LooseVersion("0.25"):
self.assert_eq(
np_func(pdf.a, pdf2.b).sort_index().rename(),
np_func(psdf.a, psdf2.b).sort_index(),
almost=True,
)
else:
self.assert_eq(
np_func(pdf.a, pdf2.b).sort_index(),
np_func(psdf.a, psdf2.b).sort_index(),
almost=True,
)
except Exception as e:
raise AssertionError("Test in '%s' function was failed." % np_name) from e
finally:
reset_option("compute.ops_on_diff_frames")
def test_np_spark_compat_frame(self):
# Use randomly generated dataFrame
pdf = pd.DataFrame(
np.random.randint(-100, 100, size=(np.random.randint(100), 2)), columns=["a", "b"]
)
pdf2 = pd.DataFrame(
np.random.randint(-100, 100, size=(len(pdf), len(pdf.columns))), columns=["a", "b"]
)
psdf = ps.from_pandas(pdf)
psdf2 = ps.from_pandas(pdf2)
for np_name, spark_func in unary_np_spark_mappings.items():
np_func = getattr(np, np_name)
if np_name not in self.blacklist:
try:
# unary ufunc
self.assert_eq(np_func(pdf), np_func(psdf), almost=True)
except Exception as e:
raise AssertionError("Test in '%s' function was failed." % np_name) from e
for np_name, spark_func in binary_np_spark_mappings.items():
np_func = getattr(np, np_name)
if np_name not in self.blacklist:
try:
# binary ufunc
self.assert_eq(np_func(pdf, pdf), np_func(psdf, psdf), almost=True)
self.assert_eq(np_func(pdf, 1), np_func(psdf, 1), almost=True)
except Exception as e:
raise AssertionError("Test in '%s' function was failed." % np_name) from e
# Test only top 5 for now. 'compute.ops_on_diff_frames' option increases too much time.
try:
set_option("compute.ops_on_diff_frames", True)
for np_name, spark_func in list(binary_np_spark_mappings.items())[:5]:
np_func = getattr(np, np_name)
if np_name not in self.blacklist:
try:
# binary ufunc
self.assert_eq(
np_func(pdf, pdf2).sort_index(),
np_func(psdf, psdf2).sort_index(),
almost=True,
)
except Exception as e:
raise AssertionError("Test in '%s' function was failed." % np_name) from e
finally:
reset_option("compute.ops_on_diff_frames")
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_numpy_compat import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
apache-2.0
|
nelson-liu/scikit-learn
|
examples/applications/plot_prediction_latency.py
|
85
|
11395
|
"""
==================
Prediction Latency
==================
This is an example showing the prediction latency of various scikit-learn
estimators.
The goal is to measure the latency one can expect when doing predictions
either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
"""
# Authors: Eustache Diemert <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
from sklearn.utils import shuffle
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[[i], :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
random_seed = 13
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=n_train, random_state=random_seed)
X_train, y_train = shuffle(X_train, y_train, random_state=random_seed)
X_scaler = StandardScaler()
X_train = X_scaler.fit_transform(X_train)
X_test = X_scaler.transform(X_test)
y_scaler = StandardScaler()
y_train = y_scaler.fit_transform(y_train[:, None])[:, 0]
y_test = y_scaler.transform(y_test[:, None])[:, 0]
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def boxplot_runtimes(runtimes, pred_type, configuration):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(runtimes, )
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
plt.setp(ax1, xticklabels=cls_infos)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Prediction Time per Instance - %s, %d feats.' % (
pred_type.capitalize(),
configuration['n_features']))
ax1.set_ylabel('Prediction Time (us)')
plt.show()
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def plot_n_features_influence(percentiles, percentile):
fig, ax1 = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
for i, cls_name in enumerate(percentiles.keys()):
x = np.array(sorted([n for n in percentiles[cls_name].keys()]))
y = np.array([percentiles[cls_name][n] for n in x])
plt.plot(x, y, color=colors[i], )
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Evolution of Prediction Time with #Features')
ax1.set_xlabel('#Features')
ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)
plt.show()
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
throughputs = dict()
for estimator_config in configuration['estimators']:
estimator_config['instance'].fit(X_train, y_train)
start_time = time.time()
n_predictions = 0
while (time.time() - start_time) < duration_secs:
estimator_config['instance'].predict(X_test[[0]])
n_predictions += 1
throughputs[estimator_config['name']] = n_predictions / duration_secs
return throughputs
def plot_benchmark_throughput(throughputs, configuration):
fig, ax = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
cls_values = [throughputs[estimator_conf['name']] for estimator_conf in
configuration['estimators']]
plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)
ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))
ax.set_xticklabels(cls_infos, fontsize=10)
ymax = max(cls_values) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('Throughput (predictions/sec)')
ax.set_title('Prediction Throughput for different estimators (%d '
'features)' % configuration['n_features'])
plt.show()
###############################################################################
# main code
start_time = time.time()
# benchmark bulk/atomic prediction speed for various regressors
configuration = {
'n_train': int(1e3),
'n_test': int(1e2),
'n_features': int(1e2),
'estimators': [
{'name': 'Linear Model',
'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,
l1_ratio=0.25, fit_intercept=True),
'complexity_label': 'non-zero coefficients',
'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},
{'name': 'RandomForest',
'instance': RandomForestRegressor(),
'complexity_label': 'estimators',
'complexity_computer': lambda clf: clf.n_estimators},
{'name': 'SVR',
'instance': SVR(kernel='rbf'),
'complexity_label': 'support vectors',
'complexity_computer': lambda clf: len(clf.support_vectors_)},
]
}
benchmark(configuration)
# benchmark n_features influence on prediction speed
percentile = 90
percentiles = n_feature_influence({'ridge': Ridge()},
configuration['n_train'],
configuration['n_test'],
[100, 250, 500], percentile)
plot_n_features_influence(percentiles, percentile)
# benchmark throughput
throughputs = benchmark_throughputs(configuration)
plot_benchmark_throughput(throughputs, configuration)
stop_time = time.time()
print("example run in %.2fs" % (stop_time - start_time))
|
bsd-3-clause
|
sonnyhu/scikit-learn
|
sklearn/svm/tests/test_bounds.py
|
9
|
2471
|
import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
from sklearn.utils.testing import assert_raise_message
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
# loss='l2' should raise ValueError
assert_raise_message(ValueError, "loss type not in",
l1_min_c, dense_X, Y1, "l2")
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
|
bsd-3-clause
|
deeplook/bokeh
|
examples/glyphs/trail.py
|
10
|
4257
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from math import sin, cos, atan2, sqrt, radians
import numpy as np
import scipy.ndimage as im
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.resources import INLINE
from bokeh.browserlib import view
from bokeh.models.glyphs import Line, Patches
from bokeh.models.widgets import VBox
from bokeh.models import (
Plot, GMapPlot, GMapOptions,
DataRange1d, ColumnDataSource,
LinearAxis, Grid,
PanTool, WheelZoomTool, ResetTool)
from bokeh.sampledata.mtb import obiszow_mtb_xcm
def haversin(theta):
return sin(0.5 * theta) ** 2
def distance(p1, p2):
"""Distance between (lat1, lon1) and (lat2, lon2). """
R = 6371
lat1, lon1 = p1
lat2, lon2 = p2
phi1 = radians(lat1)
phi2 = radians(lat2)
delta_lat = radians(lat2 - lat1)
delta_lon = radians(lon2 - lon1)
a = haversin(delta_lat) + cos(phi1) * cos(phi2) * haversin(delta_lon)
return 2 * R * atan2(sqrt(a), sqrt(1 - a))
def prep_data(dataset):
df = dataset.copy()
latlon = list(zip(df.lat, df.lon))
dist = np.array([distance(latlon[i + 1], latlon[i]) for i in range(len((latlon[:-1])))])
df["dist"] = np.concatenate(([0], np.cumsum(dist)))
slope = np.abs(100 * np.diff(df.alt) / (1000 * dist))
slope[np.where( slope < 4) ] = 0 # "green"
slope[np.where((slope >= 4) & (slope < 6))] = 1 # "yellow"
slope[np.where((slope >= 6) & (slope < 10))] = 2 # "pink"
slope[np.where((slope >= 10) & (slope < 15))] = 3 # "orange"
slope[np.where( slope >= 15 )] = 4 # "red"
slope = im.median_filter(slope, 6)
colors = np.empty_like(slope, dtype=object)
colors[np.where(slope == 0)] = "green"
colors[np.where(slope == 1)] = "yellow"
colors[np.where(slope == 2)] = "pink"
colors[np.where(slope == 3)] = "orange"
colors[np.where(slope == 4)] = "red"
df["colors"] = list(colors) + [None] # NOTE: add [None] just make pandas happy
return df
title = "Obiszów MTB XCM"
def trail_map(data):
lon = (min(data.lon) + max(data.lon)) / 2
lat = (min(data.lat) + max(data.lat)) / 2
map_options = GMapOptions(lng=lon, lat=lat, zoom=13)
plot = GMapPlot(title="%s - Trail Map" % title, map_options=map_options, plot_width=800, plot_height=800)
plot.x_range = DataRange1d()
plot.y_range = DataRange1d()
plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
line_source = ColumnDataSource(dict(x=data.lon, y=data.lat, dist=data.dist))
line = Line(x="x", y="y", line_color="blue", line_width=2)
plot.add_glyph(line_source, line)
return plot
def altitude_profile(data):
plot = Plot(title="%s - Altitude Profile" % title, plot_width=800, plot_height=400)
plot.x_range = DataRange1d()
plot.y_range = DataRange1d()
xaxis = LinearAxis(axis_label="Distance (km)")
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis(axis_label="Altitude (m)")
plot.add_layout(yaxis, 'left')
xgrid = Grid(plot=plot, dimension=0, ticker=xaxis.ticker)
ygrid = Grid(plot=plot, dimension=1, ticker=yaxis.ticker)
plot.renderers.extend([xgrid, ygrid])
plot.add_tools(PanTool(), WheelZoomTool(), ResetTool())
X, Y = data.dist, data.alt
y0 = min(Y)
patches_source = ColumnDataSource(dict(
xs=[[X[i], X[i+1], X[i+1], X[i]] for i in range(len(X[:-1])) ],
ys=[[y0, y0, Y[i+1], Y[i]] for i in range(len(Y[:-1])) ],
color=data.colors[:-1]
))
patches = Patches(xs="xs", ys="ys", fill_color="color", line_color="color")
plot.add_glyph(patches_source, patches)
line_source = ColumnDataSource(dict(x=data.dist, y=data.alt))
line = Line(x='x', y='y', line_color="black", line_width=1)
plot.add_glyph(line_source, line)
return plot
data = prep_data(obiszow_mtb_xcm)
trail = trail_map(data)
altitude = altitude_profile(data)
layout = VBox(children=[altitude, trail])
doc = Document()
doc.add(layout)
if __name__ == "__main__":
filename = "trail.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Trail map and altitude profile"))
print("Wrote %s" % filename)
view(filename)
|
bsd-3-clause
|
maxentile/advanced-ml-project
|
scripts/flow_map.py
|
1
|
6301
|
import numpy as np
import pylab as pl
pl.rcParams['font.family']='Serif'
import networkx as nx
from sklearn import neighbors
from sklearn.neighbors import KernelDensity
from sklearn.cluster import AgglomerativeClustering
from scipy.spatial import distance
from sklearn.base import BaseEstimator,TransformerMixin
class FlowMap(BaseEstimator,TransformerMixin):
def __init__(self,target_clust_num=200,
min_edges=2,
max_edges=20,
r_percentile=1.0,
density_estimator='k',
k=10,r=0.1):
if density_estimator=='k':
self.density_estimator=lambda X: self.local_density_k(X,k)
else:
if density_estimator not in 'rk':
print('density_estimator not understood: defaulting to radial')
self.density_estimator=lambda X: self.local_density_r(X,r)
self.k=k
self.r=r
self.target_clust_num=target_clust_num
self.min_edges=min_edges
self.max_edges=max_edges
self.r_percentile=r_percentile
self.accept_prob_func=self.compute_accept_prob
def local_density_k(self,X,k=10,metric=None):
if metric != None:
bt = neighbors.BallTree(X,200,metric=metric)
neighbor_graph = neighbors.kneighbors_graph(X,k,'distance')
else:
neighbor_graph = neighbors.kneighbors_graph(X,k,'distance')
distances = np.array(neighbor_graph.mean(1))[:,0]
return 1-((distances - distances.min())/(distances.max() - distances.min()))
def local_density_r(self,X,r=0.1,metric=None):
if metric != None:
bt = neighbors.BallTree(X,200,metric=metric)
neighbor_graph = neighbors.radius_neighbors_graph(bt,r)
else:
neighbor_graph = neighbors.radius_neighbors_graph(X,r)
counts = np.array(neighbor_graph.sum(1))[:,0]
return ((counts - counts.min())/(counts.max() - counts.min()))
def compute_accept_prob(self,densities,
outlier_density_percentile=1.0,
target_density_percentile=3.0):
''' densities is a vector of densities '''
OD = np.percentile(densities,outlier_density_percentile)
TD = np.percentile(densities,target_density_percentile)
accept_prob = np.zeros(len(densities))
for i,LD in enumerate(densities):
if LD < OD:
accept_prob[i] = 0
elif LD > OD and LD <= TD:
accept_prob[i] = 1
elif LD > TD:
accept_prob[i] = TD/LD
return accept_prob
def accept_according_to_probs(self,accept_prob):
''' just output indices'''
return accept_prob > np.random.rand(len(accept_prob))
# compute cluster centers given cluster assignments
def compute_cluster_centers(self,X,C):
centers = np.zeros((len(set(C)),len(X.T)))
for i in set(C):
points = X[C==i]
centers[i] = np.mean(points,0)
return centers
def num_edges(self,densities,min_edges=2,max_edges=20):
''' pass in an array of densities'''
assert(len(densities)>1)
min_density = np.min(densities)
max_density = np.max(densities)
lambdas = densities / (max_density - min_density)
return np.array(min_edges + lambdas*(max_edges - min_edges),dtype=int)
def construct_graph(self,centers,num_edges_array):
max_edges = np.max(num_edges_array)
G = nx.Graph()
nn = neighbors.NearestNeighbors(max_edges+1)
nn.fit(centers)
for i in range(len(centers)):
dist,neigh = nn.kneighbors(centers[i],num_edges_array[i]+1)
dist = dist[0]
neigh = neigh[0]
for j in range(1,len(dist)):
G.add_edge(i,neigh[j],weight=dist[j])
return G
def fit_transform(self,X):
# to-do: need to implement edge pruning using time-data, as in paper
# namely, edges can only be drawn between points in the same timepoint or
# adjacent timepoints
# Density-dependent downsampling
est_density = self.density_estimator(X)
accept_prob = self.accept_prob_func(est_density)
accept_ind = self.accept_according_to_probs(accept_prob)
downsampled = X[accept_ind]
# Clustering
cluster_model = AgglomerativeClustering(self.target_clust_num)
C = cluster_model.fit_predict(downsampled)
# Graph construction over cluster centers
centers = self.compute_cluster_centers(downsampled,C)
pdist = distance.pdist(centers)
#distmat = distance.squareform(pdist)
r = np.percentile(pdist,self.r_percentile)
#adj = (distmat < r)
num_neighbors = self.local_density_r(centers,self.r)
#pl.hist(num_neighbors,bins=len(set(num_neighbors)));
#sorted_clust_id = sorted(range(len(num_neighbors)),key=lambda i:num_neighbors[i])
num_edges_array = self.num_edges(num_neighbors,self.min_edges,self.max_edges)
G = self.construct_graph(centers,num_edges_array)
#w = 1/(distmat)
#w[w==np.inf]=0
#weighted_adj_mat = w*adj
# Rendering
pos = nx.graphviz_layout(G)
positions = np.array(pos.values())
pl.scatter(positions[:,0],positions[:,1])
nx.draw_networkx_edges(G,pos=positions)
pl.show()
return positions
def main():
def generate_blobs(num_samples=5000,separation=8):
centers = np.array([[0,0],[1,0],[0,1],[1,1]],dtype=float)
centers -= 0.5
centers = np.vstack((centers,#centers*2,centers*3,
#centers*4,centers*5,centers*6,
#centers*7,centers*8,centers*9,
#centers*10,centers*11,centers*12,
#centers*13,centers*14,
[0,0]))
centers *= separation
kde = KernelDensity()
kde.fit(centers)
samples = kde.sample(num_samples)
density = kde.score_samples(samples)
return samples,density
samples,density = generate_blobs(1000,10)
f = FlowMap()
pos = f.fit_transform(samples)
if __name__ == '__main__':
main()
|
mit
|
rohit21122012/DCASE2013
|
runs/2016/dnn2016med_lfcc/combine_results.py
|
6
|
3064
|
import csv
import math
import os
import numpy as np
import tensorflow as tf
from sklearn import preprocessing as pp
def get_results(file_name):
# type: (string) -> list of tuples
results = []
if os.path.isfile(file_name):
with open(file_name, 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
results.append(row)
else:
raise IOError("Result file not found [%s]" % file_name)
return results
def get_results_all_fold(path):
tot_res = []
for fold in xrange(1, 5):
tot_res = get_results('result_fold' + str(fold) + '.csv')
return tot_res
# Read First Results
mfcc_results = get_results_all_fold('mfcc_res.csv')
# Read Second Results
lfcc_results = get_results_all_fold('lfcc_res.csv')
# Read Third Results
antimfcc_results = get_results_all_fold('antimfcc_res.csv')
x_v = mfcc_results[:, 3]
y_v = lfcc_results[:, 3]
z_v = antimfcc_results[:, 3]
res_v = mfcc_results[:, 0][None]
lb = pp.LabelBinarizer()
res_v = lb.fit_transform(None)
x = tf.placeholder(tf.float32, shape=[None, 15])
y = tf.placeholder(tf.float32, shape=[None, 15])
z = tf.placeholder(tf.float32, shape=[None, 15])
w = tf.Variable([0.3, 0.3, 0.3], name="w")
res_model = (x * tf.exp(w[0]) + y * tf.exp(w[1]) + z * tf.exp(w[2])) / (tf.exp(w[0]) + tf.exp(w[1]) + tf.exp(w[2]))
res = tf.placeholder(tf.float32, shape=[None, 15])
error = tf.reduce_sum(tf.square(res - res_model))
train_op = tf.train.GradientDescentOptimizer(0.01).minimize(error)
model = tf.initialize_all_variables()
# x_v = np.random.rand(15 * 78, 15)
# y_v = np.random.rand(15 * 78, 15)
# z_v = np.zeros((15 * 78, 15))
# res_v = np.random.rand(15 * 78, 15)
tot_error_v = 0.0
with tf.Session() as session:
session.run(model)
for i in range(len(x_v) / 30):
x_batch = x_v[30 * i:30 * i + 29]
y_batch = y_v[30 * i:30 * i + 29]
z_batch = z_v[30 * i:30 * i + 29]
res_batch = res_v[30 * i:30 * i + 29]
_, error_v = session.run([train_op, error], feed_dict={x: x_batch,
y: y_batch,
z: z_batch,
res: res_batch})
tot_error_v += error_v
w_v = session.run(w)
sum_w_v = math.exp(w_v[0]) + math.exp(w_v[1]) + math.exp(w_v[2])
print("Model: {a:.3f}x + {b:.3f}y + {c:.3f}z".format(a=math.exp(w_v[0]) / sum_w_v, b=math.exp(w_v[1]) / sum_w_v,
c=math.exp(w_v[2]) / sum_w_v))
print("Error: {e:.3f}".format(e=tot_error_v))
min_error_v = 1000000000.0
mi = 1.0
mj = 1.0
for i in np.arange(0, 1, 0.01):
for j in np.arange(0, 1 - i, 0.01):
error_v = np.sum((i * x_v + j * y_v + (1 - i - j) * z_v - res_v) ** 2)
if error_v < min_error_v:
min_error_v = error_v
mi = i
mj = j
print("Model: {a:.3f}x + {b:.3f}y + {c:.3f}z".format(a=mi, b=mj, c=1 - mi - mj))
print("Error: {e:.3f}".format(e=min_error_v))
|
mit
|
paris-saclay-cds/ramp-workflow
|
rampwf/utils/pretty_print.py
|
1
|
2805
|
# coding: utf-8
"""
Utility methods to print the results in a terminal using term colors
"""
import os
import platform
from pandas import option_context
from ..externals.colored import stylize, fg, attr
IS_WINDOWS = platform.system() == "Windows"
# known terminal types which can handle colors on any system
COLOR_TERMS = ['xterm-256color', 'cygwin', 'xterm-color']
# 'xterm' can handle color on macos but not on windows
IS_COLOR_TERM = 'TERM' in os.environ and (
os.environ['TERM'] in COLOR_TERMS or (
os.environ['TERM'] == 'xterm' and not IS_WINDOWS
)
)
# Dictionary of term colors used for printing to terminal
fg_colors = {
'official_train': 'light_green',
'official_valid': 'light_blue',
'official_test': 'red',
'train': 'dark_sea_green_3b',
'valid': 'light_slate_blue',
'test': 'pink_1',
'title': 'gold_3b',
'warning': 'grey_46',
}
def print_title(title):
if IS_COLOR_TERM:
title = stylize(title, fg(fg_colors['title']) + attr('bold'))
print(title)
def print_warning(warning):
if IS_COLOR_TERM:
warning = stylize(warning, fg(fg_colors['warning']))
print(warning)
def print_df_scores(df_scores, indent=''):
"""Pretty print the scores dataframe.
Parameters
----------
df_scores : pd.DataFrame
the score dataframe
indent : str, default=''
indentation if needed
"""
with option_context("display.width", None):
df_repr = repr(df_scores)
df_repr_out = []
for line, color_key in zip(df_repr.splitlines(),
[None, None] +
list(df_scores.index.values)):
if line.strip() == 'step':
continue
if color_key is None:
# table header
if IS_COLOR_TERM:
line = stylize(line, fg(fg_colors['title']) + attr('bold'))
if color_key is not None:
tokens = line.split()
tokens_bak = tokens[:]
if 'official_' + color_key in fg_colors:
# line label and official score bold & bright
if IS_COLOR_TERM:
label_color = fg(fg_colors['official_' + color_key])
tokens[0] = stylize(tokens[0], label_color + attr('bold'))
tokens[1] = stylize(tokens[1], label_color + attr('bold'))
if IS_COLOR_TERM and (color_key in fg_colors):
# other scores pale
tokens[2:] = [stylize(token, fg(fg_colors[color_key]))
for token in tokens[2:]]
for token_from, token_to in zip(tokens_bak, tokens):
line = line.replace(token_from, token_to)
line = indent + line
df_repr_out.append(line)
print('\n'.join(df_repr_out))
|
bsd-3-clause
|
JanetMatsen/Machine_Learning_CSE_546
|
HW3/code/not_updated/ridge_regression.py
|
2
|
12001
|
import numpy as np
import pandas as pd
import scipy.sparse as sp
import scipy.sparse.linalg as splin
import time;
from classification_base import ClassificationBase
class RidgeMulti(ClassificationBase):
"""
Train multiple ridge models.
"""
def __init__(self, X, y, lam, W=None, verbose=False, sparse=True,
test_X=None, test_y = None, kernelized=False):
"""
test_X, test_y are for compatibility only, because the questions for
other methods require knowing test data during fitting.
"""
super(RidgeMulti, self).__init__(X=X, y=y, W=W, sparse=sparse)
self.sparse = sparse
if self.sparse:
assert lam != 0, "can't invert the big stuff with lambda = 0."
self.X = sp.csc_matrix(self.X)
self.Y = sp.csc_matrix(self.Y)
self.lam = lam
self.W = None # don't want to have W before solving!
self.matrix_work = None
self.verbose = verbose
self.kernelized = kernelized
def get_weights(self):
if self.sparse:
return self.W.toarray()
if not self.sparse:
return self.W
def apply_weights(self):
if self.verbose:
print("Apply weights to H(X): {}".format(time.asctime(time.localtime(time.time()))))
# Apply weights
if self.sparse:
assert type(self.W) == sp.csc_matrix or type(self.W) == sp.csr_matrix, \
"type of W is {}".format(type(self.W))
assert type(self.X) == sp.csc_matrix, \
"type of W is {}".format(type(self.X))
prod = self.X.dot(self.W)
if self.verbose:
print("Done applying weights to H(X): {}".format(time.asctime(time.localtime(time.time()))))
if type(prod) == sp.csc_matrix:
return prod.toarray()
else:
return prod
else:
if self.verbose:
print("Done applying weights to H(X): {}".format(time.asctime(time.localtime(time.time()))))
return self.X.dot(self.W)
def optimize(self):
# When solving multiclass, (X^TX + lambdaI)-1X^T is shared
# solve it once and share it with all the regressors.
# find lambda*I_D + X^T*X
if self.verbose: print("optimize: multiply matrices before inversion.")
# Get (X^TX + lambdaI)
if self.sparse:
piece_to_invert = sp.csc_matrix(sp.identity(self.d)*self.lam) + \
self.X.T.dot(self.X)
else:
piece_to_invert = np.identity(self.d)*self.lam + self.X.T.dot(self.X)
assert piece_to_invert.shape == (self.d, self.d)
# Invert (X^TX + lambdaI)
if self.verbose:
print("invert matrix:")
print("time: {}".format(time.asctime(time.localtime(time.time()))))
if self.sparse:
inverted_piece = splin.inv(piece_to_invert)
else:
inverted_piece = np.linalg.inv(piece_to_invert)
# Dot with X^T
if self.verbose:
print("time: {}".format(time.asctime(time.localtime(time.time()))))
print("dot with X^T:")
self.matrix_work = inverted_piece.dot(self.X.T)
assert self.matrix_work.shape == (self.d, self.N)
if self.verbose:
print("train the {} classifiers:".format(self.C))
# Train C classifiers.
self.W = self.matrix_work.dot(self.Y)
if self.verbose:
print("done generating weights.")
assert self.W.shape == (self.d, self.C)
return self.W
def kernelized_optimize(self):
# fact: H^T(HH^T + lambda*I_N) == (lambda*I_d + H^TH)H^T
# instead of inverting a dxd matrix, we invert an nxn matrix.
# So our ridge formula becomes:
# (lambda*I_d + H^TH)^(-1)H^T = H^T(HH^T + lambdaI_N)^(-1)
if self.sparse:
piece_to_invert = self.X.dot(self.X.T) + sp.identity(self.N)*self.lam
else:
piece_to_invert = self.X.dot(self.X.T) + np.identity(self.N)*self.lam
assert piece_to_invert.shape == (self.N, self.N) # yay!
# invert this NxN matrix.
if self.verbose:
print("invert matrix:")
print("time: {}".format(time.asctime(time.localtime(time.time()))))
if self.sparse:
inverted_piece = splin.inv(piece_to_invert)
else:
inverted_piece = np.linalg.inv(piece_to_invert)
if self.verbose:
print("done inverting via kernel trick at time: {}".format(time.asctime(time.localtime(time.time()))))
# dot with H^T.dot(y)
if self.verbose:
print("dot with H^T at time: {}".format(time.asctime(time.localtime(time.time()))))
self.W = self.X.T.dot(inverted_piece).dot(self.Y)
if self.verbose:
print("done dotting with H^T at time: {}".format(time.asctime(time.localtime(time.time()))))
assert self.W.shape == (self.d, self.C)
def predict(self):
if self.verbose:
print("prediction time.")
if self.W is None:
if self.kernelized:
self.kernelized_optimize()
else:
self.optimize()
Yhat = self.apply_weights()
assert type(Yhat) == np.ndarray
classes = np.argmax(Yhat, axis=1)
if self.sparse:
yhat = np.multiply(self.Y.toarray(), Yhat)
else:
yhat = np.multiply(self.Y, Yhat)
# collapse it into an Nx1 array:
self.yhat = np.amax(yhat, axis=1)
return classes
def run(self):
self.predict()
self.results = pd.DataFrame(self.results_row())
def loss_01(self):
return self.pred_to_01_loss(self.predict())
def results_row(self):
"""
Return a dictionary that can be put into a Pandas DataFrame.
"""
results_row = super(RidgeMulti, self).results_row()
# append on Ridge regression-specific results
more_details = {
"lambda":[self.lam],
"training SSE":[self.sse()],
"training RMSE":[self.rmse()],
"kernelized solvin":[self.kernelized]
}
results_row.update(more_details)
return results_row
def sse(self):
"""
Calculate the sum of squared errors.
In class on 10/26, Sham coached us to include errors for all
classifications in our RMSE (and thus SSE) calculations.
For y = [0, 1], Y=[[0, 1], [1, 0]], Yhat = [[0.01, 0.95], [0.99, 0.03]],
SSE = sum(0.01**2 + 0.05**2 + 0.01**2 + 0.03**2) = RSS
Note: this would not be equivalent to the binary classifier, which
would only sum (0.05**2 + 0.03**2)
My formula before only used the errors for the correct class:
error = self.apply_weights() - self.Y
error = np.multiply(error, self.Y)
error = np.amax(np.abs(error), axis=1)
return error.T.dot(error)
:return: sum of squared errors for all classes for each point (float)
"""
if self.sparse:
error = self.apply_weights() - self.Y.toarray()
assert type(error) == np.ndarray
else:
error = self.apply_weights() - self.Y
return np.multiply(error, error).sum()
def rmse(self):
"""
For the binary classifier, RMSE = (SSE/N)**0.5.
For the multiclass one, SSE is counting errors for all classifiers.
We could use (self.sse()/self.N/self.C)**0.5 to make the RMSE
calcs more similar between the binary and multi-class classifiers,
but they still are not the same, so I won't.
:return: RMSE (float)
"""
return(self.sse()/self.N)**0.5
class RidgeBinary(ClassificationBase):
"""
Train *one* ridge model.
"""
def __init__(self, X, y, lam, w=None, test_X=None, test_y = None):
"""
test_X, test_y are for compatibility only, because the questions for
other methods require knowing test data during fitting.
"""
self.X = X
self.N, self.d = X.shape
self.y = y
self.lam = lam
if w is None:
self.w = np.zeros(self.d)
assert self.w.shape == (self.d, )
self.threshold = None
def get_weights(self):
return self.w
def apply_weights(self):
return self.X.dot(self.w)
def run(self):
# find lambda*I_D + X^T*X
piece_to_invert = np.identity(self.d)*self.lam + self.X.T.dot(self.X)
inverted_piece = np.linalg.inv(piece_to_invert)
solution = inverted_piece.dot(self.X.T)
solution = solution.dot(self.y)
solution = np.squeeze(np.asarray(solution))
assert solution.shape == (self.d, )
self.w = solution
self.results = pd.DataFrame(self.results_row())
def predict(self, threshold):
if self.verbose:
print("dot X with W to make predictions. {}".format(time.asctime(time.localtime(time.time()))))
# TODO: having a default cutoff is a terrible idea!
Yhat = self.X.dot(self.w)
if self.verbose:
print("done dotting. {}".format(time.asctime(time.localtime(time.time()))))
classes = np.zeros(self.N)
classes[Yhat > threshold] = 1
return classes
def loss_01(self, threshold=None):
if threshold is None:
threshold=0.5
print("WARNING: 0/1 loss is calculated for threshold=0.5, which "
"is very likely to be a poor choice!!")
return self.pred_to_01_loss(self.predict(threshold))
def results_row(self):
"""
Return a dictionary that can be put into a Pandas DataFrame.
"""
results_row = super(RidgeBinary, self).results_row()
# append on logistic regression-specific results
more_details = {
"lambda":[self.lam],
"SSE":[self.sse()],
"RMSE":[self.rmse()],
}
results_row.update(more_details)
return results_row
def sse(self):
# sse = RSS
error = self.apply_weights() - self.y
return error.T.dot(error)
def rmse(self):
return(self.sse()/self.N)**0.5
class RidgeRegularizationPath:
""" DEPRECATED """
# TODO: refactor so it uses HyperparameterSweep class
def __init__(self, train_X, train_y, lam_max, frac_decrease, steps,
val_X, val_y):
self.train_X = train_X
self.train_y = train_y
self.train_N, self.train_d = train_X.shape
self.lam_max = lam_max
self.frac_decrease = frac_decrease
self.steps = steps
self.val_X = val_X
self.val_y = val_y
def train_with_lam(self, lam):
rr = RidgeBinary(self.train_X, self.train_y, lam=lam)
rr.solve()
sse_train = rr.sse()
# replace the y values with the validation y and get the val sss
rr.X = self.val_X
rr.y = self.val_y
sse_val = rr.sse()
assert rr.w.shape == (self.train_d, 1) # check before we slice out
return rr.w.toarray()[:,0], sse_train, sse_val
def walk_path(self):
# protect the first value of lambda.
lam = self.lam_max/self.frac_decrease
# initialize a dataframe to store results in
results = pd.DataFrame()
for c in range(0, self.steps):
lam = lam*self.frac_decrease
print("Loop {}: solving weights. Lambda = {}".format(c+1, lam))
w, sse_train, sse_val = self.train_with_lam(lam)
one_val = pd.DataFrame({"lam":[lam],
"weights":[w],
"SSE (training)": [sse_train],
"SSE (validaton)": [sse_val]})
results = pd.concat([results, one_val])
self.results_df = results
|
mit
|
hrjn/scikit-learn
|
sklearn/linear_model/stochastic_gradient.py
|
16
|
50617
|
# Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..utils import check_array, check_random_state, check_X_y
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils import compute_class_weight
from ..utils import deprecated
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
if self.learning_rate == "optimal" and self.alpha == 0:
raise ValueError("alpha must be > 0 since "
"learning_rate is 'optimal'. alpha is used "
"to compute the optimal learning rate.")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match "
"dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function_,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function_, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.n_jobs = int(n_jobs)
@property
@deprecated("Attribute loss_function was deprecated in version 0.19 and "
"will be removed in 0.21. Use 'loss_function_' instead")
def loss_function(self):
return self.loss_function_
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if getattr(self, "coef_", None) is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
self.loss_function_ = self._get_loss_function(loss)
if not hasattr(self, "t_"):
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and hasattr(self, "coef_"):
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = 1.0
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(self.intercept_)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights,"
" use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
constructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
- 'constant': eta = eta0
- 'optimal': eta = 1.0 / (alpha * (t + t0)) [default]
- 'invscaling': eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10`` will begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
loss_function_ : concrete ``LossFunction``
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions
it is necessary to perform proper probability calibration by wrapping
the classifier with
:class:`sklearn.calibration.CalibratedClassifierCV` instead.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if getattr(self, "coef_", None) is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
if self.average > 0 and getattr(self, "average_coef_", None) is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and getattr(self, "coef_", None) is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = 1.0
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if not hasattr(self, "t_"):
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate schedule:
- 'constant': eta = eta0
- 'optimal': eta = 1.0 / (alpha * (t + t0)) [default]
- 'invscaling': eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10`` will begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
|
bsd-3-clause
|
chandrasg/chandrasg.github.io
|
markdown_generator/talks.py
|
199
|
4000
|
# coding: utf-8
# # Talks markdown generator for academicpages
#
# Takes a TSV of talks with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `talks.py`. Run either from the `markdown_generator` folder after replacing `talks.tsv` with one containing your data.
#
# TODO: Make this work with BibTex and other databases, rather than Stuart's non-standard TSV format and citation style.
# In[1]:
import pandas as pd
import os
# ## Data format
#
# The TSV needs to have the following columns: title, type, url_slug, venue, date, location, talk_url, description, with a header at the top. Many of these fields can be blank, but the columns must be in the TSV.
#
# - Fields that cannot be blank: `title`, `url_slug`, `date`. All else can be blank. `type` defaults to "Talk"
# - `date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper.
# - The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/talks/YYYY-MM-DD-[url_slug]`
# - The combination of `url_slug` and `date` must be unique, as it will be the basis for your filenames
#
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
talks = pd.read_csv("talks.tsv", sep="\t", header=0)
talks
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
if type(text) is str:
return "".join(html_escape_table.get(c,c) for c in text)
else:
return "False"
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page.
# In[5]:
loc_dict = {}
for row, item in talks.iterrows():
md_filename = str(item.date) + "-" + item.url_slug + ".md"
html_filename = str(item.date) + "-" + item.url_slug
year = item.date[:4]
md = "---\ntitle: \"" + item.title + '"\n'
md += "collection: talks" + "\n"
if len(str(item.type)) > 3:
md += 'type: "' + item.type + '"\n'
else:
md += 'type: "Talk"\n'
md += "permalink: /talks/" + html_filename + "\n"
if len(str(item.venue)) > 3:
md += 'venue: "' + item.venue + '"\n'
if len(str(item.location)) > 3:
md += "date: " + str(item.date) + "\n"
if len(str(item.location)) > 3:
md += 'location: "' + str(item.location) + '"\n'
md += "---\n"
if len(str(item.talk_url)) > 3:
md += "\n[More information here](" + item.talk_url + ")\n"
if len(str(item.description)) > 3:
md += "\n" + html_escape(item.description) + "\n"
md_filename = os.path.basename(md_filename)
#print(md)
with open("../_talks/" + md_filename, 'w') as f:
f.write(md)
# These files are in the talks directory, one directory below where we're working from.
|
mit
|
LaboratoireMecaniqueLille/Comedi
|
v_0.18.py
|
1
|
14604
|
# -*- coding: utf-8 -*-
#import threading
import comedi as c
import time
import numpy as np
import scipy.interpolate as scipy_interpolate
import matplotlib.pyplot as plt
#import sys
#import random
from multiprocessing import Process, Pipe#, Array, Value
import copy
import math
#import datetime
np.set_printoptions(threshold='nan', linewidth=500)
from matplotlib import rcParams
rcParams['font.family'] = 'serif'
class Path:
def __init__(self,path_file,step):
self.path_file=path_file
self.step=step
self.xp=np.arange(0,6,0.05)
self.yp=0.5*np.sin(frequence*self.xp)+0.6
self.degp=0.2*np.sin(frequence*self.xp)+0.6
#with open(self.path_file,'r') as path:
#self.coordinate=np.array([[float(x) for x in ln.split()] for ln in path]) # transforms raw data in an array of float
#self.xp=self.coordinate[:,0]
#self.yp=self.coordinate[:,1]
#self.degp=self.coordinate[:,2]
self.x=np.arange(np.min(self.xp),np.max(self.xp)+self.step,self.step) # create an array for the x parameter
def interpolate_linear(self):
self.y=np.interp(self.x,self.xp,self.yp)
self.deg=np.interp(self.x,self.xp,self.degp)
def interpolate_spline(self):
spline=scipy_interpolate.InterpolatedUnivariateSpline(self.xp,self.yp)
self.y=spline(self.x)
spline_deg=scipy_interpolate.InterpolatedUnivariateSpline(self.xp,self.degp)
self.deg=spline_deg(self.x)
class Out:
def __init__(self, device='/dev/comedi0',subdevice=1,channel=0,range_num=1,gain=1,offset=0):
self.subdevice=subdevice
self.channel=channel
self.range_num=range_num
self.device0=c.comedi_open(device)
self.maxdata=c.comedi_get_maxdata(self.device0,self.subdevice,self.channel)
self.range_ds=c.comedi_get_range(self.device0,self.subdevice,self.channel,self.range_num)
self.out=0
self.gain=gain
self.offset=offset
self.I_term=0
self.last_sensor_input=0
self.K=K
self.Ki=Ki
self.Kd=Kd
self.last_time=t0
self.out_min=0
self.out_max=4.095
self.last_output=0
def set_(self,wanted_position):
self.out=(wanted_position-self.offset)/self.gain
out_a=c.comedi_from_phys(self.out,self.range_ds,self.maxdata) # convert the wanted_position
c.comedi_data_write(self.device0,self.subdevice,self.channel,self.range_num,c.AREF_GROUND,out_a) # send the signal to the controler
#t_=datetime.datetime.now()
#t=(((((((t_.year*12)+t_.month)*30+t_.day)*24+t_.hour)*60+t_.minute)*60+t_.second)*1000000)+t_.microsecond
t=time.time()
#return (t-t0,self.out)
def set_PID(self,wanted_position,sensor_input):
self.time= time.time()
self.out=(wanted_position-self.offset)/self.gain
#print "sensor=%s" %sensor_input
self.error=self.out-sensor_input
self.I_term += self.Ki*self.error*(self.last_time-self.time)
if self.I_term>self.out_max:
self.I_term=self.out_max
elif self.I_term<self.out_min:
self.I_term=self.out_min
self.out_PID=self.last_output+self.K*self.error+self.I_term-self.Kd*(sensor_input-self.last_sensor_input)/(self.last_time-self.time)
if self.out_PID>self.out_max:
self.out_PID=self.out_max
elif self.out_PID<self.out_min:
self.out_PID=self.out_min
self.last_time=copy.copy(self.time)
self.last_sensor_input=copy.copy(sensor_input)
self.last_output=copy.copy(self.out_PID)
#self.t.append(time.time()-t0)
#print "I_term= %s, out_PID=%s" %(self.I_term, self.out_PID)
out_a=c.comedi_from_phys(self.out_PID,self.range_ds,self.maxdata) # convert the wanted_position
c.comedi_data_write(self.device0,self.subdevice,self.channel,self.range_num,c.AREF_GROUND,out_a) # send the signal to the controler
#t_=datetime.datetime.now()
#t=(((((((t_.year*12)+t_.month)*30+t_.day)*24+t_.hour)*60+t_.minute)*60+t_.second)*1000000)+t_.microsecond
t=time.time()
return (t-t0,self.out_PID)
class In:
def __init__(self,device='/dev/comedi0',subdevice=0,channel=1,range_num=0,gain=1,offset=0):
self.subdevice=subdevice
self.channel=channel
self.range_num=range_num
self.device0=c.comedi_open(device)
self.maxdata=c.comedi_get_maxdata(self.device0,self.subdevice,self.channel)
self.range_ds=c.comedi_get_range(self.device0,self.subdevice,self.channel,self.range_num)
self.gain=gain
self.offset=offset
#self.y=[]
#self.t=[]
def get(self):
data = c.comedi_data_read(self.device0,self.subdevice,self.channel,self.range_num, c.AREF_GROUND)
self.position=(c.comedi_to_phys(data[1],self.range_ds,self.maxdata)*self.gain+self.offset)
t=time.time()
#t_=datetime.datetime.now()
#t=(((((((t_.year*12)+t_.month)*30+t_.day)*24+t_.hour)*60+t_.minute)*60+t_.second)*1000000)+t_.microsecond
return ((t-t0), self.position)
if __name__ == '__main__':
################ THIS SECTION IS FOR INIT AND PARAMETERS ################
step = 0.002 # choose step here
# set PID parameters below :
K=1.0
Ki=0.
Kd=0.0
frequence=5 # "frequency" of the sinus used for tests in the Path class.
acquisition_step=500#Define how many points the scripts waits before saving them
saving_step=1 # Allows you to save 1 point every "saving step": use this parameter for high frequencies and long durations.
Path=Path(path_file='/media/corentin/data/Git/Comedi/Chemin', step=step) # open file 'chemin', wich has to be in the same directory
Path.interpolate_linear() #choose here for interpolation method
#Path.interpolate_spline() #
#t0_=datetime.datetime.now() # set a common time for all channels
#t0=(((((((t0_.year*12)+t0_.month)*30+t0_.day)*24+t0_.hour)*60+t0_.minute)*60+t0_.second)*1000000)+t0_.microsecond
t0=time.time()
################ START YOUR PROGRAM FORM HERE: ################
################ Initialise your classes here:
# set subdevice to 1 for outputs and 0 for inputs.
# set channel according to your connection to the camedi card
# range_num define the range for the A/D and D/A converters, depending if your input/out is bipolar or not. See the documentation for values.
Rotate=Out(device='/dev/comedi0',subdevice=1,channel=1,range_num=1,gain=1,offset=0)
Move=Out(device='/dev/comedi0',subdevice=1,channel=0,range_num=1,gain=1,offset=0)
Angle=In(device='/dev/comedi0',subdevice=0,channel=1,range_num=0,gain=1,offset=0)
Position=In(device='/dev/comedi0',subdevice=0,channel=0,range_num=0,gain=1,offset=0)
################ Variables used to share the in and out data. You need one for each variable in each input/output
traction_sensor_send, traction_sensor_recv = Pipe()
traction_time_send, traction_time_recv = Pipe()
torsion_sensor_send, torsion_sensor_recv = Pipe()
torsion_time_send, torsion_time_recv = Pipe()
pipe_send,pipe_recv=Pipe() # This one transfert data from save to graph
################ Functions:
def f(I,O,path_x,path_y,time_pipe,sensor_pipe): # Main function, allows you to control one actuator.
for i in range(len(path_x)):
t1=time.time()
while t1<(t0+(path_x[i])): # Waits for the time set in the path file
t1=time.time()
a,b=(I()) # measuring the position and saving it in the shared variables
c,d=(O(path_y[i], b)) # setting the position to a new value and saving it in the saherd variables
time_pipe.send(a) # send data to the save function
sensor_pipe.send(b)
time_pipe.send(0.0) # signal that the acquisition is over
sensor_pipe.send(0.0)
### This is an alternative to interpolate on the go
#t1=time.time()-t0
#if t1<= path_x[-1]:
#y=np.interp(t1,path_x,path_y)
##while t1<(t0+(path_x[i])): # Waits for the time set in the path file
##t1=time.time()
#a,b=(I()) # measuring the position and saving it in the shared variables
#c,d=(O(y, b)) # setting the position to a new value and saving it in the saherd variables
#time_pipe.send(a) # send data to the save function
#sensor_pipe.send(b)
def save(*args):# This function saves data in a file and display it in an animated plot
nbr=len(args)
### INIT
condition=True
save_number=0
### Main loop
while condition==True:
## init data matrixes
data=[[0 for x in xrange(acquisition_step)] for x in xrange(nbr)]
i=0
## This loop fill the data matrix up to "acquisition step" number of values
while i<acquisition_step and condition==True:
for z in range (nbr):
data[z][i]=args[z].recv()
if data[0][i]==0.0: # if acquisiton is over, save remaining data
condition=False
i+=1
## send data to plot
pipe_send.send(data)
## The following loops are used to save the data
fo=open("log.txt","a") # "a" for appending
fo.seek(0,2) #place the "cursor" at the end of the file, so every writing will not erase the previous ones
data_to_save=""
data1=np.empty((np.shape(np.array(data))[0],int(math.ceil(len(data[0])//saving_step))))
if saving_step>1: # This loop means the data to decrease the number of points to save
for x in range(int(math.ceil(len(data[0])//saving_step))): # euclidian division here
for i in range(np.shape(np.array(data))[0]):
if x<(len(data[0])//saving_step):
data1[i][x]=(np.mean(data[i][x*saving_step:(x+1)*saving_step]))
else:
data1[i][x]=(np.mean(data[i][x*saving_step:]))
data_to_save=str(np.transpose(data1))+"\n"
else: # this loop save all data
data_to_save=str(np.transpose(data))+"\n"
fo.write(data_to_save)
fo.close()
save_number+=1
def plot(graph_recv_n,nbr_graphs): # plot up to 3 differents graphs in one figure, and keep a fixed abscisse range. On update, old plots are erased and new ones are added. No memory overload, this plot is safe even for long plots.
condition=True
save_number=0
## init the plot
fig=plt.figure()
ax=fig.add_subplot(111)
li,= ax.plot(np.arange(5000),np.zeros(5000))
if nbr_graphs ==2: # add a 2nd graph in the same plot
lo,= ax.plot(np.arange(5000),np.zeros(5000))
if nbr_graphs ==3: # add a 3rd graph
la,= ax.plot(np.arange(5000),np.zeros(5000))
#ax.set_ylim(0,1.2)
fig.canvas.draw() # draw and show it
plt.show(block=False)
nbr=nbr_graphs*2
var=[[]]*nbr
while condition==True:
data=graph_recv_n.recv()
## this loop is used for the continous plotting
if save_number>0:
if save_number==1: # this loop init the first round of data
for z in range(nbr):
var[z]=copy.copy(data[z])
if save_number<6 and save_number>1: # This integer define the size of the plot: it plots "x" times the data.
for z in range(nbr):
var[z]=copy.copy(np.concatenate((var[z],(data[z])),axis=1))
else : # this loop delete the first values of the plot and add new value at the end to create a continuous plot
for z in range(nbr):
var[z][:-np.shape(np.array(data))[1]] = var[z][np.shape(np.array(data))[1]:]
var[z][-np.shape(np.array(data))[1]:]= data[z]
li.set_xdata(var[0])
li.set_ydata(var[1]) # update the graph values
if nbr_graphs ==2:
lo.set_xdata(var[2])
lo.set_ydata(var[3])
if nbr_graphs ==3:
la.set_xdata(var[4])
la.set_ydata(var[5])
ax.relim()
ax.autoscale_view(True,True,True)
fig.canvas.draw()
save_number+=1
def plot_value_value(graph_recv_n,order): # This function plot one or 2 graph of y=f(x) , and you can choose y and x in the order variable. Autoscale, but doesn't reset. BEWARE, long plots may cause data losses.
condition=True
nbr=len(order) # number of variables
plt.ion()
fig=plt.figure()
while condition==True:
data=graph_recv_n.recv() # receive data from main graph process
plt.plot(data[order[0]],data[order[1]],'b-')
plt.xlabel(order[-1][0])
plt.ylabel(order[-1][1])
if nbr ==4:
plt.plot(data[order[2]],data[order[3]],'r-')
plt.draw()
def graph(*args): # This function as to be called in a process. It create the desired plots and updates the data in link with the save function.
condition=True
graph_send={}
graph_recv={}
graph_n={}
data=pipe_recv.recv() # this pipe receive data from the save function
nbr_graphs=len(args)
for i in range(nbr_graphs):
graph_type=args[i][0] # the first value of args[i] is the graph type
graph_args=args[i][1:] # other values depend on the graph type
graph_send[i],graph_recv[i]=Pipe() #creating pipes to communicate with the graphs to be created
if graph_type=='values':
graph_send[i].send(data) #init the pipe
graph_n[i]=Process(target=plot_value_value,args=(graph_recv[i],graph_args)) # creating a new process for each graph
if graph_type=='time':
graph_send[i].send(data)#init the pipe
graph_n[i]=Process(target=plot,args=(graph_recv[i],graph_args[0]))# creating a new process for each graph
graph_n[i].start() #start graphs processes
while condition==True: # this loop will feed the pipes with new data received from the save process.
data=pipe_recv.recv()
for i in range(nbr_graphs):
graph_send[i].send(data)
def test_signal(): ## debug function, deprecated
while signal[0]!=2:
print "signal[0]=%s , signal[1]=%s" %(signal[0],signal[1])
time.sleep(0.1)
try:
################ This part is used to define the processes:
Traction=Process(target=f,args=(Position.get,Move.set_PID,Path.x,Path.y,traction_time_send,traction_sensor_send))
Rotation=Process(target=f,args=(Angle.get,Rotate.set_PID,Path.x,Path.deg,torsion_time_send,torsion_sensor_send))
Save=Process(target=save,args=(traction_time_recv,traction_sensor_recv,torsion_time_recv,torsion_sensor_recv))
Graph=Process(target=graph,args=([['time',2],['values',0,1,2,3,['Time (s)','Value (unit)']]]))
################ This part is used to start the processes
Save.start()
Traction.start()
Rotation.start()
Graph.start()
Save.join()
Traction.join()
Rotation.join()
Graph.join()
except (KeyboardInterrupt):
################ This part is used to terminate the processes once they are done. DO NOT FORGET this part or the process will keep running on your computer.
Save.terminate()
print "save terminated"
Traction.terminate()
print "traction terminated"
Rotation.terminate()
print "torsion terminated"
Graph.terminate()
print "graph terminated"
|
gpl-2.0
|
B3AU/waveTree
|
sklearn/svm/classes.py
|
5
|
28558
|
from .base import BaseLibLinear, BaseSVC, BaseLibSVM
from ..base import RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin
from ..feature_selection.from_model import _LearntSelectorMixin
class LinearSVC(BaseLibLinear, LinearClassifierMixin, _LearntSelectorMixin,
SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better (to large numbers of
samples).
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'l1' or 'l2' (default='l2')
Specifies the loss function. 'l1' is the hinge loss (standard SVM)
while 'l2' is the squared hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from an theoretical perspective
as it is consistent it is seldom used in practice and rarely leads to
better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : int, default: 0
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Attributes
----------
`coef_` : array, shape = [n_features] if n_classes == 2 \
else [n_classes, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `raw_coef_` that \
follows the internal memory layout of liblinear.
`intercept_` : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. Furthermore
SGDClassifier is scalable to large number of samples as it uses
a Stochastic Gradient Descent optimizer.
Finally SGDClassifier can fit both dense and sparse data without
memory copy if the input is C-contiguous or CSR.
"""
def __init__(self, penalty='l2', loss='l2', dual=True, tol=1e-4, C=1.0,
multi_class='ovr', fit_intercept=True, intercept_scaling=1,
class_weight=None, verbose=0, random_state=None):
super(LinearSVC, self).__init__(
penalty=penalty, loss=loss, dual=dual, tol=tol, C=C,
multi_class=multi_class, fit_intercept=fit_intercept,
intercept_scaling=intercept_scaling,
class_weight=class_weight, verbose=verbose,
random_state=random_state)
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementations is a based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each,
see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
.. The narrative documentation is available at http://scikit-learn.org/
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default=0.0)
Kernel coefficient for 'rbf', 'poly' and 'sigm'.
If gamma is 0.0 then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [n_SV, n_features]
Support vectors.
`n_support_` : array-like, dtype=int32, shape = [n_class]
number of support vector for each class.
`dual_coef_` : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function. \
For multiclass, coefficient for all 1-vs-1 classifiers. \
The layout of the coefficients in the multiclass case is somewhat \
non-trivial. See the section about multi-class classification in the \
SVM section of the User Guide for details.
`coef_` : array, shape = [n_class-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, degree=3,
gamma=0.0, kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma=0.0,
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, random_state=None):
super(SVC, self).__init__(
'c_svc', kernel, degree, gamma, coef0, tol, C, 0., 0., shrinking,
probability, cache_size, class_weight, verbose, max_iter,
random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
degree of kernel function
is significant only in poly, rbf, sigmoid
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
independent term in kernel function. It is only significant
in poly/sigmoid.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [n_SV, n_features]
Support vectors.
`n_support_` : array-like, dtype=int32, shape = [n_class]
number of support vector for each class.
`dual_coef_` : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function. \
For multiclass, coefficient for all 1-vs-1 classifiers. \
The layout of the coefficients in the multiclass case is somewhat \
non-trivial. See the section about multi-class classification in \
the SVM section of the User Guide for details.
`coef_` : array, shape = [n_class-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, coef0=0.0, degree=3, gamma=0.0, kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma=0.0,
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, verbose=False, max_iter=-1,
random_state=None):
super(NuSVC, self).__init__(
'nu_svc', kernel, degree, gamma, coef0, tol, 0., nu, 0., shrinking,
probability, cache_size, None, verbose, max_iter, random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementations is a based on libsvm.
Parameters
----------
C : float, optional (default=1.0)
penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
degree of kernel function
is significant only in poly, rbf, sigmoid
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
independent term in kernel function. It is only significant
in poly/sigmoid.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimaton.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [nSV, n_features]
Support vectors.
`dual_coef_` : array, shape = [n_classes-1, n_SV]
Coefficients of the support vector in the decision function.
`coef_` : array, shape = [n_classes-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma=0.0,
kernel='rbf', max_iter=-1, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
"""
def __init__(self, kernel='rbf', degree=3, gamma=0.0, coef0=0.0, tol=1e-3,
C=1.0, epsilon=0.1, shrinking=True, probability=False,
cache_size=200, verbose=False, max_iter=-1,
random_state=None):
super(SVR, self).__init__(
'epsilon_svr', kernel, degree, gamma, coef0, tol, C, 0., epsilon,
shrinking, probability, cache_size, None, verbose,
max_iter, random_state)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces with the parameter epsilon of SVR.
The implementations is a based on libsvm.
Parameters
----------
C : float, optional (default=1.0)
penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken. Only available if impl='nu_svc'.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
degree of kernel function
is significant only in poly, rbf, sigmoid
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
independent term in kernel function. It is only significant
in poly/sigmoid.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [nSV, n_features]
Support vectors.
`dual_coef_` : array, shape = [n_classes-1, n_SV]
Coefficients of the support vector in the decision function.
`coef_` : array, shape = [n_classes-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma=0.0, kernel='rbf',
max_iter=-1, nu=0.1, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma=0.0, coef0=0.0, shrinking=True,
probability=False, tol=1e-3, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(NuSVR, self).__init__(
'nu_svr', kernel, degree, gamma, coef0, tol, C, nu, 0., shrinking,
probability, cache_size, None, verbose, max_iter, random_state)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outliers Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional
Degree of kernel function. Significant only in poly, rbf, sigmoid.
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional
Independent term in kernel function. It is only significant in
poly/sigmoid.
tol : float, optional
Tolerance for stopping criterion.
shrinking: boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [nSV, n_features]
Support vectors.
`dual_coef_` : array, shape = [n_classes-1, n_SV]
Coefficient of the support vector in the decision function.
`coef_` : array, shape = [n_classes-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma=0.0, coef0=0.0, tol=1e-3,
nu=0.5, shrinking=True, cache_size=200, verbose=False,
max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, [], sample_weight=sample_weight,
**params)
return self
|
bsd-3-clause
|
SeanCameronConklin/aima-python
|
submissions/Hawley/kmeans.py
|
13
|
1586
|
import numpy as np
import matplotlib.pyplot as plt
# import fileinput
N = 100 # number of observations / 'points'
K = 4 # number of categories / 'means'
P = 10 # plot interval
def distance(x1,y1,x2,y2): # pythagorean distance
return np.sqrt( (x2-x1)**2 + (y2-y1)**2)
# Fancy data structure: We will group points by common indices in separate arrays,
# i.e. the first point will have coordinates (x[1],y[1])
points_x = np.random.rand(N) # points are random on [0,1]
points_y = np.random.rand(N)
colors = np.random.rand(N).astype(int) # colors will show who belongs to which mean
means_x = np.random.rand(K) # initialize means w/ random numbers on [0,1]
means_y = np.random.rand(K)
fig = plt.figure()
iterations = 100
for i in range(iterations):
# loop over all points: figure out who belongs to which means (assign colors)
for j in range(N):
min_dist = 99999.9 # big number
for m in range(K): # loop over all means
dist = distance(points_x[j], points_y[j], means_x[m], means_y[m])
if (dist < min_dist): # then update the color
min_dist = dist
colors[j] = m
#re-evaluate means
for m in range(K):
inds = np.where( m == colors) # indices of everybody belonging to one mean
means_x[m] = np.mean(points_x[inds]) # take the mean of the x-values in the group
means_y[m] = np.mean(points_y[inds]) # take the mean of the y-values in the group
# Update the picture
if(not i % P):
plt.scatter(points_x, points_y, c=colors, s=50, alpha=0.7)
plt.show()
# print('Proceed', '?')
# proceed = fileinput.input()
|
mit
|
ngoix/OCRF
|
examples/ensemble/plot_gradient_boosting_regression.py
|
87
|
2510
|
"""
============================
Gradient Boosting regression
============================
Demonstrate Gradient Boosting on the Boston housing dataset.
This example fits a Gradient Boosting model with least squares loss and
500 regression trees of depth 4.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
###############################################################################
# Load data
boston = datasets.load_boston()
X, y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
###############################################################################
# Fit regression model
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 2,
'learning_rate': 0.01, 'loss': 'ls'}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
###############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_predict(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, boston.feature_names[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
|
bsd-3-clause
|
redreamality/deeppy
|
setup.py
|
16
|
2509
|
#!/usr/bin/env python
import os
import re
from setuptools import setup, find_packages, Command
from setuptools.command.test import test as TestCommand
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
with open('requirements.txt') as f:
install_requires = [l.strip() for l in f]
version = None
regex = re.compile(r'''^__version__ = ['"]([^'"]*)['"]''')
with open(os.path.join('deeppy', '__init__.py')) as f:
for line in f:
mo = regex.search(line)
if mo is not None:
version = mo.group(1)
break
if version is None:
raise RuntimeError('Could not find version number')
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import subprocess
subprocess.call(['py.test'] + self.pytest_args + ['test'])
class Coverage(Command):
description = 'Generate a test coverage report.'
user_options = [('report=', 'r', 'Report type (report/html)')]
def initialize_options(self):
self.report = 'report'
def finalize_options(self):
pass
def run(self):
import subprocess
subprocess.call(['coverage', 'run', '--source=deeppy', '-m', 'py.test',
'test'])
subprocess.call(['coverage', self.report])
setup(
name='deeppy',
version=version,
author='Anders Boesen Lindbo Larsen',
author_email='[email protected]',
description='Deep learning in Python',
license='MIT',
url='http://compute.dtu.dk/~abll',
packages=find_packages(exclude=['doc', 'examples', 'test']),
install_requires=install_requires,
long_description=read('README.md'),
cmdclass={
'test': PyTest,
'coverage': Coverage,
},
extras_require={
'test': ['pytest', 'sklearn'],
'coverage': ['pytest', 'sklearn', 'coverage'],
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
],
)
|
mit
|
wibeasley/PyCap
|
redcap/project.py
|
2
|
22048
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Scott Burns <[email protected]>'
__license__ = 'MIT'
__copyright__ = '2014, Vanderbilt University'
import json
from .request import RCRequest, RedcapError, RequestException
class Project(object):
"""Main class for interacting with REDCap projects"""
def __init__(self, url, token, name='', verify_ssl=True):
"""
Parameters
----------
url : str
API URL to your REDCap server
token : str
API token to your project
name : str, optional
name for project
verify_ssl : boolean, str
Verify SSL, default True. Can pass path to CA_BUNDLE.
"""
self.token = token
self.name = name
self.url = url
self.verify = verify_ssl
try:
self.metadata = self.__md()
except RequestException:
raise RedcapError("Exporting metadata failed. Check your URL and token.")
self.field_names = self.filter_metadata('field_name')
# we'll use the first field as the default id for each row
self.def_field = self.field_names[0]
self.field_labels = self.filter_metadata('field_label')
self.forms = tuple(set(c['form_name'] for c in self.metadata))
# determine whether longitudinal
ev_data = self._call_api(self.__basepl('event'), 'exp_event')[0]
if 'error' in ev_data:
events = tuple([])
arm_nums = tuple([])
arm_names = tuple([])
else:
events = ev_data
arm_data = self._call_api(self.__basepl('arm'), 'exp_arm')[0]
arm_nums = tuple([a['arm_num'] for a in arm_data])
arm_names = tuple([a['name'] for a in arm_data])
self.events = events
self.arm_nums = arm_nums
self.arm_names = arm_names
def __md(self):
"""Return the project's metadata structure"""
p_l = self.__basepl('metadata')
p_l['content'] = 'metadata'
return self._call_api(p_l, 'metadata')[0]
def __basepl(self, content, rec_type='flat', format='json'):
"""Return a dictionary which can be used as is or added to for
payloads"""
d = {'token': self.token, 'content': content, 'format': format}
if content not in ['metadata', 'file']:
d['type'] = rec_type
return d
def is_longitudinal(self):
"""
Returns
-------
boolean :
longitudinal status of this project
"""
return len(self.events) > 0 and \
len(self.arm_nums) > 0 and \
len(self.arm_names) > 0
def filter_metadata(self, key):
"""
Return a list of values for the metadata key from each field
of the project's metadata.
Parameters
----------
key: str
A known key in the metadata structure
Returns
-------
filtered :
attribute list from each field
"""
filtered = [field[key] for field in self.metadata if key in field]
if len(filtered) == 0:
raise KeyError("Key not found in metadata")
return filtered
def _kwargs(self):
"""Private method to build a dict for sending to RCRequest
Other default kwargs to the http library should go here"""
return {'verify': self.verify}
def _call_api(self, payload, typpe, **kwargs):
request_kwargs = self._kwargs()
request_kwargs.update(kwargs)
rcr = RCRequest(self.url, payload, typpe)
return rcr.execute(**request_kwargs)
def export_fem(self, arms=None, format='json', df_kwargs=None):
"""
Export the project's form to event mapping
Parameters
----------
arms : list
Limit exported form event mappings to these arm numbers
format : (``'json'``), ``'csv'``, ``'xml'``
Return the form event mappings in native objects,
csv or xml, ``'df''`` will return a ``pandas.DataFrame``
df_kwargs : dict
Passed to pandas.read_csv to control construction of
returned DataFrame
Returns
-------
fem : list, str, ``pandas.DataFrame``
form-event mapping for the project
"""
ret_format = format
if format == 'df':
from StringIO import StringIO
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('formEventMapping', format=ret_format)
to_add = [arms]
str_add = ['arms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'exp_fem')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
return read_csv(StringIO(response))
else:
return read_csv(StringIO(response), **df_kwargs)
def export_metadata(self, fields=None, forms=None, format='json',
df_kwargs=None):
"""
Export the project's metadata
Parameters
----------
fields : list
Limit exported metadata to these fields
forms : list
Limit exported metadata to these forms
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Return the metadata in native objects, csv or xml.
``'df'`` will return a ``pandas.DataFrame``.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default ``{'index_col': 'field_name'}``
Returns
-------
metadata : list, str, ``pandas.DataFrame``
metadata sttructure for the project.
"""
ret_format = format
if format == 'df':
from StringIO import StringIO
from pandas import read_csv
ret_format = 'csv'
pl = self.__basepl('metadata', format=ret_format)
to_add = [fields, forms]
str_add = ['fields', 'forms']
for key, data in zip(str_add, to_add):
if data:
pl[key] = ','.join(data)
response, _ = self._call_api(pl, 'metadata')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
df_kwargs = {'index_col': 'field_name'}
return read_csv(StringIO(response), **df_kwargs)
def export_records(self, records=None, fields=None, forms=None,
events=None, raw_or_label='raw', event_name='label',
format='json', export_survey_fields=False,
export_data_access_groups=False, df_kwargs=None):
"""
Export data from the REDCap project.
Parameters
----------
records : list
array of record names specifying specific records to export.
by default, all records are exported
fields : list
array of field names specifying specific fields to pull
by default, all fields are exported
forms : list
array of form names to export. If in the web UI, the form
name has a space in it, replace the space with an underscore
by default, all forms are exported
events : list
an array of unique event names from which to export records
:note: this only applies to longitudinal projects
raw_or_label : (``'raw'``), ``'label'``, ``'both'``
export the raw coded values or labels for the options of
multiple choice fields, or both
event_name : (``'label'``), ``'unique'``
export the unique event name or the event label
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Format of returned data. ``'json'`` returns json-decoded
objects while ``'csv'`` and ``'xml'`` return other formats.
``'df'`` will attempt to return a ``pandas.DataFrame``.
export_survey_fields : (``False``), True
specifies whether or not to export the survey identifier
field (e.g., "redcap_survey_identifier") or survey timestamp
fields (e.g., form_name+"_timestamp") when surveys are
utilized in the project.
export_data_access_groups : (``False``), ``True``
specifies whether or not to export the
``"redcap_data_access_group"`` field when data access groups
are utilized in the project.
:note: This flag is only viable if the user whose token is
being used to make the API request is *not* in a data
access group. If the user is in a group, then this flag
will revert to its default value.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default, ``{'index_col': self.def_field}``
Returns
-------
data : list, str, ``pandas.DataFrame``
exported data
"""
ret_format = format
if format == 'df':
from pandas import read_csv
from StringIO import StringIO
ret_format = 'csv'
pl = self.__basepl('record', format=ret_format)
keys_to_add = (records, fields, forms, events,
raw_or_label, event_name, export_survey_fields,
export_data_access_groups)
str_keys = ('records', 'fields', 'forms', 'events', 'rawOrLabel',
'eventName', 'exportSurveyFields', 'exportDataAccessGroups')
for key, data in zip(str_keys, keys_to_add):
if data:
# Make a url-ok string
if key in ('fields', 'records', 'forms', 'events'):
pl[key] = ','.join(data)
else:
pl[key] = data
response, _ = self._call_api(pl, 'exp_record')
if format in ('json', 'csv', 'xml'):
return response
elif format == 'df':
if not df_kwargs:
if self.is_longitudinal():
df_kwargs = {'index_col': [self.def_field,
'redcap_event_name']}
else:
df_kwargs = {'index_col': self.def_field}
buf = StringIO(response)
df = read_csv(buf, **df_kwargs)
buf.close()
return df
def metadata_type(self, field_name):
"""If the given field_name is validated by REDCap, return it's type"""
return self.__meta_metadata(field_name,
'text_validation_type_or_show_slider_number')
def __meta_metadata(self, field, key):
"""Return the value for key for the field in the metadata"""
mf = ''
try:
mf = str([f[key] for f in self.metadata
if f['field_name'] == field][0])
except IndexError:
print("%s not in metadata field:%s" % (key, field))
return mf
else:
return mf
def filter(self, query, output_fields=None):
"""Query the database and return subject information for those
who match the query logic
Parameters
----------
query: Query or QueryGroup
Query(Group) object to process
output_fields: list
The fields desired for matching subjects
Returns
-------
A list of dictionaries whose keys contains at least the default field
and at most each key passed in with output_fields, each dictionary
representing a surviving row in the database.
"""
query_keys = query.fields()
if not set(query_keys).issubset(set(self.field_names)):
raise ValueError("One or more query keys not in project keys")
query_keys.append(self.def_field)
data = self.export_records(fields=query_keys)
matches = query.filter(data, self.def_field)
if matches:
# if output_fields is empty, we'll download all fields, which is
# not desired, so we limit download to def_field
if not output_fields:
output_fields = [self.def_field]
# But if caller passed a string and not list, we need to listify
if isinstance(output_fields, basestring):
output_fields = [output_fields]
return self.export_records(records=matches, fields=output_fields)
else:
# If there are no matches, then sending an empty list to
# export_records will actually return all rows, which is not
# what we want
return []
def names_labels(self, do_print=False):
"""Simple helper function to get all field names and labels """
if do_print:
for name, label in zip(self.field_names, self.field_labels):
print('%s --> %s' % (str(name), str(label)))
return self.field_names, self.field_labels
def import_records(self, to_import, overwrite='normal', format='json',
return_format='json', return_content='count',
date_format='YMD'):
"""
Import data into the RedCap Project
Parameters
----------
to_import : array of dicts, csv/xml string, ``pandas.DataFrame``
:note:
If you pass a csv or xml string, you should use the
``format`` parameter appropriately.
:note:
Keys of the dictionaries should be subset of project's,
fields, but this isn't a requirement. If you provide keys
that aren't defined fields, the returned response will
contain an ``'error'`` key.
overwrite : ('normal'), 'overwrite'
``'overwrite'`` will erase values previously stored in the
database if not specified in the to_import dictionaries.
format : ('json'), 'xml', 'csv'
Format of incoming data. By default, to_import will be json-encoded
return_format : ('json'), 'csv', 'xml'
Response format. By default, response will be json-decoded.
return_content : ('count'), 'ids', 'nothing'
By default, the response contains a 'count' key with the number of
records just imported. By specifying 'ids', a list of ids
imported will be returned. 'nothing' will only return
the HTTP status code and no message.
date_format : ('YMD'), 'DMY', 'MDY'
Describes the formatting of dates. By default, date strings
are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date
strings are formatted as 'MM/DD/YYYY' set this parameter as
'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No
other formattings are allowed.
Returns
-------
response : dict, str
response from REDCap API, json-decoded if ``return_format`` == ``'json'``
"""
pl = self.__basepl('record')
if hasattr(to_import, 'to_csv'):
# We'll assume it's a df
from StringIO import StringIO
buf = StringIO()
if self.is_longitudinal():
csv_kwargs = {'index_label': [self.def_field,
'redcap_event_name']}
else:
csv_kwargs = {'index_label': self.def_field}
to_import.to_csv(buf, **csv_kwargs)
pl['data'] = buf.getvalue()
buf.close()
format = 'csv'
elif format == 'json':
pl['data'] = json.dumps(to_import, separators=(',', ':'))
else:
# don't do anything to csv/xml
pl['data'] = to_import
pl['overwriteBehavior'] = overwrite
pl['format'] = format
pl['returnFormat'] = return_format
pl['returnContent'] = return_content
pl['dateFormat'] = date_format
response = self._call_api(pl, 'imp_record')[0]
if 'error' in response:
raise RedcapError(str(response))
return response
def export_file(self, record, field, event=None, return_format='json'):
"""
Export the contents of a file stored for a particular record
Notes
-----
Unlike other export methods, this works on a single record.
Parameters
----------
record : str
record ID
field : str
field name containing the file to be exported.
event: str
for longitudinal projects, specify the unique event here
return_format: ('json'), 'csv', 'xml'
format of error message
Returns
-------
content : bytes
content of the file
content_map : dict
content-type dictionary
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# there's no format field in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'export'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
content, headers = self._call_api(pl, 'exp_file')
#REDCap adds some useful things in content-type
if 'content-type' in headers:
splat = [kv.strip() for kv in headers['content-type'].split(';')]
kv = [(kv.split('=')[0], kv.split('=')[1].replace('"', '')) for kv
in splat if '=' in kv]
content_map = dict(kv)
else:
content_map = {}
return content, content_map
def import_file(self, record, field, fname, fobj, event=None,
return_format='json'):
"""
Import the contents of a file represented by fobj to a
particular records field
Parameters
----------
record : str
record ID
field : str
field name where the file will go
fname : str
file name visible in REDCap UI
fobj : file object
file object as returned by `open`
event : str
for longitudinal projects, specify the unique event here
return_format : ('json'), 'csv', 'xml'
format of error message
Returns
-------
response :
response from server as specified by ``return_format``
"""
self._check_file_field(field)
# load up payload
pl = self.__basepl(content='file', format=return_format)
# no format in this call
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'import'
pl['field'] = field
pl['record'] = record
if event:
pl['event'] = event
file_kwargs = {'files': {'file': (fname, fobj)}}
return self._call_api(pl, 'imp_file', **file_kwargs)[0]
def delete_file(self, record, field, return_format='json', event=None):
"""
Delete a file from REDCap
Notes
-----
There is no undo button to this.
Parameters
----------
record : str
record ID
field : str
field name
return_format : (``'json'``), ``'csv'``, ``'xml'``
return format for error message
event : str
If longitudinal project, event to delete file from
Returns
-------
response : dict, str
response from REDCap after deleting file
"""
self._check_file_field(field)
# Load up payload
pl = self.__basepl(content='file', format=return_format)
del pl['format']
pl['returnFormat'] = return_format
pl['action'] = 'delete'
pl['record'] = record
pl['field'] = field
if event:
pl['event'] = event
return self._call_api(pl, 'del_file')[0]
def _check_file_field(self, field):
"""Check that field exists and is a file field"""
is_field = field in self.field_names
is_file = self.__meta_metadata(field, 'field_type') == 'file'
if not (is_field and is_file):
msg = "'%s' is not a field or not a 'file' field" % field
raise ValueError(msg)
else:
return True
def export_users(self, format='json'):
"""
Export the users of the Project
Notes
-----
Each user will have the following keys:
* ``'firstname'`` : User's first name
* ``'lastname'`` : User's last name
* ``'email'`` : Email address
* ``'username'`` : User's username
* ``'expiration'`` : Project access expiration date
* ``'data_access_group'`` : data access group ID
* ``'data_export'`` : (0=no access, 2=De-Identified, 1=Full Data Set)
* ``'forms'`` : a list of dicts with a single key as the form name and
value is an integer describing that user's form rights,
where: 0=no access, 1=view records/responses and edit
records (survey responses are read-only), 2=read only, and
3=edit survey responses,
Parameters
----------
format : (``'json'``), ``'csv'``, ``'xml'``
response return format
Returns
-------
users: list, str
list of users dicts when ``'format'='json'``,
otherwise a string
"""
pl = self.__basepl(content='user', format=format)
return self._call_api(pl, 'exp_user')[0]
|
mit
|
mtrbean/scipy
|
scipy/signal/spectral.py
|
14
|
34751
|
"""Tools for spectral analysis.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy import fftpack
from . import signaltools
from .windows import get_window
from ._spectral import lombscargle
import warnings
from scipy._lib.six import string_types
__all__ = ['periodogram', 'welch', 'lombscargle', 'csd', 'coherence',
'spectrogram']
def periodogram(x, fs=1.0, window=None, nfft=None, detrend='constant',
return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using a periodogram.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is an array it will be used
directly as the window. Defaults to None; equivalent to 'boxcar'.
nfft : int, optional
Length of the FFT used. If None the length of `x` will be used.
detrend : str or function or False, optional
Specifies how to detrend `x` prior to computing the spectrum. If
`detrend` is a string, it is passed as the ``type`` argument to
`detrend`. If it is a function, it should return a detrended array.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power spectrum
('spectrum') where `Pxx` has units of V**2, if `x` is measured in V
and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of `x`.
Notes
-----
.. versionadded:: 0.12.0
See Also
--------
welch: Estimate power spectral density using Welch's method
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.periodogram(x, fs)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([1e-7, 1e2])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.ylim([1e-4, 1e1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if window is None:
window = 'boxcar'
if nfft is None:
nperseg = x.shape[axis]
elif nfft == x.shape[axis]:
nperseg = nfft
elif nfft > x.shape[axis]:
nperseg = x.shape[axis]
elif nfft < x.shape[axis]:
s = [np.s_[:]]*len(x.shape)
s[axis] = np.s_[:nfft]
x = x[s]
nperseg = nfft
nfft = None
return welch(x, fs, window, nperseg, 0, nfft, detrend, return_onesided,
scaling, axis)
def welch(x, fs=1.0, window='hanning', nperseg=256, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using Welch's method.
Welch's method [1]_ computes an estimate of the power spectral density
by dividing the data into overlapping segments, computing a modified
periodogram for each segment and averaging the periodograms.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power spectrum
('spectrum') where `Pxx` has units of V**2, if `x` is measured in V
and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of x.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hanning' window an
overlap of 50% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
If `noverlap` is 0, this method is equivalent to Bartlett's method [2]_.
.. versionadded:: 0.12.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika, vol. 37, pp. 1-16, 1950.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.welch(x, fs, nperseg=1024)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([0.5e-3, 1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
freqs, Pxx = csd(x, x, fs, window, nperseg, noverlap, nfft, detrend,
return_onesided, scaling, axis)
return freqs, Pxx.real
def csd(x, y, fs=1.0, window='hanning', nperseg=256, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density', axis=-1):
"""
Estimate the cross power spectral density, Pxy, using Welch's method.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap: int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross spectrum
('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
measured in V and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the CSD is computed for both inputs; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxy : ndarray
Cross spectral density or cross power spectrum of x,y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method. [Equivalent to csd(x,x)]
coherence: Magnitude squared coherence by Welch's method.
Notes
--------
By convention, Pxy is computed with the conjugate FFT of X multiplied by
the FFT of Y.
If the input series differ in length, the shorter series will be
zero-padded to match.
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hanning' window an
overlap of 50\% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Rabiner, Lawrence R., and B. Gold. "Theory and Application of
Digital Signal Processing" Prentice-Hall, pp. 414-419, 1975
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the magnitude of the cross spectral density.
>>> f, Pxy = signal.csd(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, np.abs(Pxy))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('CSD [V**2/Hz]')
>>> plt.show()
"""
freqs, _, Pxy = _spectral_helper(x, y, fs, window, nperseg, noverlap, nfft,
detrend, return_onesided, scaling, axis,
mode='psd')
# Average over windows.
if len(Pxy.shape) >= 2 and Pxy.size > 0:
if Pxy.shape[-1] > 1:
Pxy = Pxy.mean(axis=-1)
else:
Pxy = np.reshape(Pxy, Pxy.shape[:-1])
return freqs, Pxy
def spectrogram(x, fs=1.0, window=('tukey',.25), nperseg=256, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='density', axis=-1):
"""
Compute a spectrogram with consecutive Fourier transforms.
Spectrograms can be used as a way of visualizing the change of a
nonstationary signal's frequency content over time.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to a Tukey window with shape parameter of 0.25.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 8``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power spectrum
('spectrum') where `Pxx` has units of V**2, if `x` is measured in V
and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the spectrogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
t : ndarray
Array of segment times.
Sxx : ndarray
Spectrogram of x. By default, the last axis of Sxx corresponds to the
segment times.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. In contrast to welch's method, where the entire
data stream is averaged over, one may wish to use a smaller overlap (or
perhaps none at all) when computing a spectrogram, to maintain some
statistical independence between individual segments.
.. versionadded:: 0.16.0
References
----------
...[1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck "Discrete-Time
Signal Processing", Prentice Hall, 1999.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave whose frequency linearly changes
with time from 1kHz to 2kHz, corrupted by 0.001 V**2/Hz of white noise
sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> freq = np.linspace(1e3, 2e3, N)
>>> x = amp * np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the spectrogram.
>>> f, t, Sxx = signal.spectrogram(x, fs)
>>> plt.pcolormesh(t, f, Sxx)
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
"""
# Less overlap than welch, so samples are more statisically independent
if noverlap is None:
noverlap = nperseg // 8
freqs, time, Pxy = _spectral_helper(x, x, fs, window, nperseg, noverlap,
nfft, detrend, return_onesided, scaling,
axis, mode='psd')
return freqs, time, Pxy
def coherence(x, y, fs=1.0, window='hanning', nperseg=256, noverlap=None,
nfft=None, detrend='constant', axis=-1):
"""
Estimate the magnitude squared coherence estimate, Cxy, of discrete-time
signals X and Y using Welch's method.
Cxy = abs(Pxy)**2/(Pxx*Pyy), where Pxx and Pyy are power spectral density
estimates of X and Y, and Pxy is the cross spectral density estimate of X
and Y.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap: int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
axis : int, optional
Axis along which the coherence is computed for both inputs; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Cxy : ndarray
Magnitude squared coherence of x and y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
--------
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default 'hanning' window an
overlap of 50\% is a reasonable trade off between accurately estimating
the signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Stoica, Petre, and Randolph Moses, "Spectral Analysis of Signals"
Prentice Hall, 2005
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the coherence.
>>> f, Cxy = signal.coherence(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, Cxy)
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Coherence')
>>> plt.show()
"""
freqs, Pxx = welch(x, fs, window, nperseg, noverlap, nfft, detrend,
axis=axis)
_, Pyy = welch(y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
_, Pxy = csd(x, y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
Cxy = np.abs(Pxy)**2 / Pxx / Pyy
return freqs, Cxy
def _spectral_helper(x, y, fs=1.0, window='hanning', nperseg=256,
noverlap=None, nfft=None, detrend='constant',
return_onesided=True, scaling='spectrum', axis=-1,
mode='psd'):
'''
Calculate various forms of windowed FFTs for PSD, CSD, etc.
This is a helper function that implements the commonality between the
psd, csd, and spectrogram functions. It is not designed to be called
externally. The windows are not averaged over; the result from each window
is returned.
Parameters
---------
x : array_like
Array or sequence containing the data to be analyzed.
y : array_like
Array or sequence containing the data to be analyzed. If this is
the same object in memoery as x (i.e. _spectral_helper(x, x, ...)),
the extra computations are spared.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hanning'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross spectrum
('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
measured in V and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
mode : str, optional
Defines what kind of return values are expected. Options are ['psd',
'complex', 'magnitude', 'angle', 'phase'].
Returns
-------
freqs : ndarray
Array of sample frequencies.
result : ndarray
Array of output data, contents dependant on *mode* kwarg.
t : ndarray
Array of times corresponding to each data segment
References
----------
stackoverflow: Rolling window for 1D arrays in Numpy?
<http://stackoverflow.com/a/6811241>
stackoverflow: Using strides for an efficient moving average filter
<http://stackoverflow.com/a/4947453>
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
'''
if mode not in ['psd', 'complex', 'magnitude', 'angle', 'phase']:
raise ValueError("Unknown value for mode %s, must be one of: "
"'default', 'psd', 'complex', "
"'magnitude', 'angle', 'phase'" % mode)
# If x and y are the same object we can save ourselves some computation.
same_data = y is x
if not same_data and mode != 'psd':
raise ValueError("x and y must be equal if mode is not 'psd'")
axis = int(axis)
# Ensure we have np.arrays, get outdtype
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
outdtype = np.result_type(x,y,np.complex64)
else:
outdtype = np.result_type(x,np.complex64)
if not same_data:
# Check if we can broadcast the outer axes together
xouter = list(x.shape)
youter = list(y.shape)
xouter.pop(axis)
youter.pop(axis)
try:
outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape
except ValueError:
raise ValueError('x and y cannot be broadcast together.')
if same_data:
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape)
else:
if x.size == 0 or y.size == 0:
outshape = outershape + (min([x.shape[axis], y.shape[axis]]),)
emptyout = np.rollaxis(np.empty(outshape), -1, axis)
return emptyout, emptyout, emptyout
if x.ndim > 1:
if axis != -1:
x = np.rollaxis(x, axis, len(x.shape))
if not same_data and y.ndim > 1:
y = np.rollaxis(y, axis, len(y.shape))
# Check if x and y are the same length, zero-pad if neccesary
if not same_data:
if x.shape[-1] != y.shape[-1]:
if x.shape[-1] < y.shape[-1]:
pad_shape = list(x.shape)
pad_shape[-1] = y.shape[-1] - x.shape[-1]
x = np.concatenate((x, np.zeros(pad_shape)), -1)
else:
pad_shape = list(y.shape)
pad_shape[-1] = x.shape[-1] - y.shape[-1]
y = np.concatenate((y, np.zeros(pad_shape)), -1)
# X and Y are same length now, can test nperseg with either
if x.shape[-1] < nperseg:
warnings.warn('nperseg = {0:d}, is greater than input length = {1:d}, '
'using nperseg = {1:d}'.format(nperseg, x.shape[-1]))
nperseg = x.shape[-1]
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if nfft is None:
nfft = nperseg
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
elif noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
else:
noverlap = int(noverlap)
# Handle detrending and window functions
if not detrend:
def detrend_func(d):
return d
elif not hasattr(detrend, '__call__'):
def detrend_func(d):
return signaltools.detrend(d, type=detrend, axis=-1)
elif axis != -1:
# Wrap this function so that it receives a shape that it could
# reasonably expect to receive.
def detrend_func(d):
d = np.rollaxis(d, -1, axis)
d = detrend(d)
return np.rollaxis(d, axis, len(d.shape))
else:
detrend_func = detrend
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of nperseg')
if np.result_type(win,np.complex64) != outdtype:
win = win.astype(outdtype)
if mode == 'psd':
if scaling == 'density':
scale = 1.0 / (fs * (win*win).sum())
elif scaling == 'spectrum':
scale = 1.0 / win.sum()**2
else:
raise ValueError('Unknown scaling: %r' % scaling)
else:
scale = 1
if return_onesided is True:
if np.iscomplexobj(x):
sides = 'twosided'
else:
sides = 'onesided'
if not same_data:
if np.iscomplexobj(y):
sides = 'twosided'
else:
sides = 'twosided'
if sides == 'twosided':
num_freqs = nfft
elif sides == 'onesided':
if nfft % 2:
num_freqs = (nfft + 1)//2
else:
num_freqs = nfft//2 + 1
# Perform the windowed FFTs
result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft)
result = result[..., :num_freqs]
freqs = fftpack.fftfreq(nfft, 1/fs)[:num_freqs]
if not same_data:
# All the same operations on the y data
result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft)
result_y = result_y[..., :num_freqs]
result = np.conjugate(result) * result_y
elif mode == 'psd':
result = np.conjugate(result) * result
elif mode == 'magnitude':
result = np.absolute(result)
elif mode == 'angle' or mode == 'phase':
result = np.angle(result)
elif mode == 'complex':
pass
result *= scale
if sides == 'onesided':
if nfft % 2:
result[...,1:] *= 2
else:
# Last point is unpaired Nyquist freq point, don't double
result[...,1:-1] *= 2
t = np.arange(nperseg/2, x.shape[-1] - nperseg/2 + 1, nperseg - noverlap)/float(fs)
if sides != 'twosided' and not nfft % 2:
# get the last value correctly, it is negative otherwise
freqs[-1] *= -1
# we unwrap the phase here to handle the onesided vs. twosided case
if mode == 'phase':
result = np.unwrap(result, axis=-1)
result = result.astype(outdtype)
# All imaginary parts are zero anyways
if same_data and mode != 'complex':
result = result.real
# Output is going to have new last axis for window index
if axis != -1:
# Specify as positive axis index
if axis < 0:
axis = len(result.shape)-1-axis
# Roll frequency axis back to axis where the data came from
result = np.rollaxis(result, -1, axis)
else:
# Make sure window/time index is last axis
result = np.rollaxis(result, -1, -2)
return freqs, t, result
def _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft):
'''
Calculate windowed FFT, for internal use by scipy.signal._spectral_helper
This is a helper function that does the main FFT calculation for
_spectral helper. All input valdiation is performed there, and the data
axis is assumed to be the last axis of x. It is not designed to be called
externally. The windows are not averaged over; the result from each window
is returned.
Returns
-------
result : ndarray
Array of FFT data
References
----------
stackoverflow: Repeat NumPy array without replicating data?
<http://stackoverflow.com/a/5568169>
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
'''
# Created strided array of data segments
if nperseg == 1 and noverlap == 0:
result = x[..., np.newaxis]
else:
step = nperseg - noverlap
shape = x.shape[:-1]+((x.shape[-1]-noverlap)//step, nperseg)
strides = x.strides[:-1]+(step*x.strides[-1], x.strides[-1])
result = np.lib.stride_tricks.as_strided(x, shape=shape,
strides=strides)
# Detrend each data segment individually
result = detrend_func(result)
# Apply window by multiplication
result = win * result
# Perform the fft. Acts on last axis by default. Zero-pads automatically
result = fftpack.fft(result, n=nfft)
return result
|
bsd-3-clause
|
janhahne/nest-simulator
|
pynest/examples/pulsepacket.py
|
12
|
11358
|
# -*- coding: utf-8 -*-
#
# pulsepacket.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Pulse packet example
--------------------
This script compares the average and individual membrane potential excursions
in response to a single pulse packet with an analytically acquired voltage
trace (see: Diesmann [1]_)
A pulse packet is a transient spike volley with a Gaussian rate profile.
The user can specify the neural parameters, the parameters of the
pulse-packet and the number of trials.
References
~~~~~~~~~~~~
.. [1] Diesmann M. 2002. Dissertation. Conditions for stable propagation of
synchronous spiking in cortical neural networks: Single neuron dynamics
and network properties.
http://d-nb.info/968772781/34.
"""
###############################################################################
# First, we import all necessary modules for simulation, analysis and
# plotting.
import scipy.special as sp
import nest
import numpy
import matplotlib.pyplot as plt
# Properties of pulse packet:
a = 100 # number of spikes in one pulse packet
sdev = 10. # width of pulse packet (ms)
weight = 0.1 # PSP amplitude (mV)
pulsetime = 500. # occurrence time (center) of pulse-packet (ms)
# Network and neuron characteristics:
n_neurons = 100 # number of neurons
cm = 200. # membrane capacitance (pF)
tau_s = 0.5 # synaptic time constant (ms)
tau_m = 20. # membrane time constant (ms)
V0 = 0.0 # resting potential (mV)
Vth = numpy.inf # firing threshold, high value to avoid spiking
# Simulation and analysis parameters:
simtime = 1000. # how long we simulate (ms)
simulation_resolution = 0.1 # (ms)
sampling_resolution = 1. # for voltmeter (ms)
convolution_resolution = 1. # for the analytics (ms)
# Some parameters in base units.
Cm = cm * 1e-12 # convert to Farad
Weight = weight * 1e-12 # convert to Ampere
Tau_s = tau_s * 1e-3 # convert to sec
Tau_m = tau_m * 1e-3 # convert to sec
Sdev = sdev * 1e-3 # convert to sec
Convolution_resolution = convolution_resolution * 1e-3 # convert to sec
###############################################################################
# This function calculates the membrane potential excursion in response
# to a single input spike (the equation is given for example in Diesmann [1]_,
# eq.2.3).
# It expects:
#
# * ``Time``: a time array or a single time point (in sec)
# * ``Tau_s`` and ``Tau_m``: the synaptic and the membrane time constant (in sec)
# * ``Cm``: the membrane capacity (in Farad)
# * ``Weight``: the synaptic weight (in Ampere)
#
# It returns the provoked membrane potential (in mV)
def make_psp(Time, Tau_s, Tau_m, Cm, Weight):
term1 = (1 / Tau_s - 1 / Tau_m)
term2 = numpy.exp(-Time / Tau_s)
term3 = numpy.exp(-Time / Tau_m)
PSP = (Weight / Cm * numpy.exp(1) / Tau_s *
(((-Time * term2) / term1) + (term3 - term2) / term1 ** 2))
return PSP * 1e3
###############################################################################
# This function finds the exact location of the maximum of the PSP caused by a
# single input spike. The location is obtained by setting the first derivative
# of the equation for the PSP (see ``make_psp()``) to zero. The resulting
# equation can be expressed in terms of a `LambertW function`.
# This function expects:
#
# * ``Tau_s`` and ``Tau_m``: the synaptic and membrane time constant (in sec)
#
# It returns the location of the maximum (in sec)
def LambertWm1(x):
# Using scipy to mimic the gsl_sf_lambert_Wm1 function.
return sp.lambertw(x, k=-1 if x < 0 else 0).real
def find_loc_pspmax(tau_s, tau_m):
var = tau_m / tau_s
lam = LambertWm1(-numpy.exp(-1 / var) / var)
t_maxpsp = (-var * lam - 1) / var / (1 / tau_s - 1 / tau_m) * 1e-3
return t_maxpsp
###############################################################################
# First, we construct a Gaussian kernel for a given standard derivation
# (``sig``) and mean value (``mu``). In this case the standard derivation is
# the width of the pulse packet (see [1]_).
sig = Sdev
mu = 0.0
x = numpy.arange(-4 * sig, 4 * sig, Convolution_resolution)
term1 = 1 / (sig * numpy.sqrt(2 * numpy.pi))
term2 = numpy.exp(-(x - mu) ** 2 / (sig ** 2 * 2))
gauss = term1 * term2 * Convolution_resolution
###############################################################################
# Second, we calculate the PSP of a neuron due to a single spiking input.
# (see Diesmann 2002, eq. 2.3).
# Since we do that in discrete time steps, we first construct an array
# (``t_psp``) that contains the time points we want to consider. Then, the
# function ``make_psp()`` (that creates the PSP) takes the time array as its
# first argument.
t_psp = numpy.arange(0, 10 * (Tau_m + Tau_s), Convolution_resolution)
psp = make_psp(t_psp, Tau_s, Tau_m, Cm, Weight)
###############################################################################
# Now, we want to normalize the PSP amplitude to one. We therefore have to
# divide the PSP by its maximum ([1]_ sec 6.1). The function
# ``find_loc_pspmax()`` returns the exact time point (``t_pspmax``) when we
# expect the maximum to occur. The function ``make_psp()`` calculates the
# corresponding PSP value, which is our PSP amplitude (``psp_amp``).
t_pspmax = find_loc_pspmax(Tau_s, Tau_m)
psp_amp = make_psp(t_pspmax, Tau_s, Tau_m, Cm, Weight)
psp_norm = psp / psp_amp
###############################################################################
# Now we have all ingredients to compute the membrane potential excursion
# (`U`). This calculation implies a convolution of the Gaussian with the
# normalized PSP (see [1]_, eq. 6.9). In order to avoid an offset in the
# convolution, we need to add a pad of zeros on the left side of the
# normalized PSP. Later on we want to compare our analytical results with the
# simulation outcome. Therefore we need a time vector (`t_U`) with the correct
# temporal resolution, which places the excursion of the potential at the
# correct time.
psp_norm = numpy.pad(psp_norm, [len(psp_norm) - 1, 1])
U = a * psp_amp * numpy.convolve(gauss, psp_norm)
ulen = len(U)
t_U = (convolution_resolution * numpy.linspace(-ulen / 2., ulen / 2., ulen) +
pulsetime + 1.)
###############################################################################
# In this section we simulate a network of multiple neurons.
# All these neurons receive an individual pulse packet that is drawn from a
# Gaussian distribution.
#
# We reset the Kernel, define the simulation resolution and set the
# verbosity using ``set_verbosity`` to suppress info messages.
nest.ResetKernel()
nest.SetKernelStatus({'resolution': simulation_resolution})
nest.set_verbosity("M_WARNING")
###############################################################################
# Afterwards we create several neurons, the same amount of
# pulse-packet-generators and a voltmeter. All these nodes/devices
# have specific properties that are specified in device specific
# dictionaries (here: `neuron_pars` for the neurons, `ppg_pars`
# for the and pulse-packet-generators and `vm_pars` for the voltmeter).
neuron_pars = {
'V_th': Vth,
'tau_m': tau_m,
'tau_syn_ex': tau_s,
'C_m': cm,
'E_L': V0,
'V_reset': V0,
'V_m': V0
}
neurons = nest.Create('iaf_psc_alpha', n_neurons, neuron_pars)
ppg_pars = {
'pulse_times': [pulsetime],
'activity': a,
'sdev': sdev
}
ppgs = nest.Create('pulsepacket_generator', n_neurons, ppg_pars)
vm_pars = {'interval': sampling_resolution}
vm = nest.Create('voltmeter', 1, vm_pars)
###############################################################################
# Now, we connect each pulse generator to one neuron via static synapses.
# We want to keep all properties of the static synapse constant except the
# synaptic weight. Therefore we change the weight with the help of the command
# ``SetDefaults``.
# The command ``Connect`` connects all kinds of nodes/devices. Since multiple
# nodes/devices can be connected in different ways e.g., each source connects
# to all targets, each source connects to a subset of targets or each source
# connects to exactly one target, we have to specify the connection. In our
# case we use the ``one_to_one`` connection routine since we connect one pulse
# generator (source) to one neuron (target).
# In addition we also connect the `voltmeter` to the `neurons`.
nest.SetDefaults('static_synapse', {'weight': weight})
nest.Connect(ppgs, neurons, 'one_to_one')
nest.Connect(vm, neurons)
###############################################################################
# In the next step we run the simulation for a given duration in ms.
nest.Simulate(simtime)
###############################################################################
# Finally, we record the membrane potential, when it occurred and to which
# neuron it belongs. The sender and the time point of a voltage
# data point at position x in the voltage array (``V_m``), can be found at the
# same position x in the sender (`senders`) and the time array (`times`).
Vm = vm.get('events', 'V_m')
times = vm.get('events', 'times')
senders = vm.get('events', 'senders')
###############################################################################
# Here we plot the membrane potential derived from the theory and from the
# simulation. Since we simulate multiple neurons that received slightly
# different pulse packets, we plot the individual and the averaged membrane
# potentials.
#
# We plot the analytical solution U (the resting potential V0 shifts the
# membrane potential up or downwards).
plt.plot(t_U, U + V0, 'r', lw=2, zorder=3, label='analytical solution')
###############################################################################
# Then we plot all individual membrane potentials.
# The time axes is the range of the simulation time in steps of ms.
Vm_single = [Vm[senders == n.global_id] for n in neurons]
simtimes = numpy.arange(1, simtime)
for idn in range(n_neurons):
if idn == 0:
plt.plot(simtimes, Vm_single[idn], 'gray',
zorder=1, label='single potentials')
else:
plt.plot(simtimes, Vm_single[idn], 'gray', zorder=1)
###############################################################################
# Finally, we plot the averaged membrane potential.
Vm_average = numpy.mean(Vm_single, axis=0)
plt.plot(simtimes, Vm_average, 'b', lw=4,
zorder=2, label='averaged potential')
plt.legend()
plt.xlabel('time (ms)')
plt.ylabel('membrane potential (mV)')
plt.xlim((-5 * (tau_m + tau_s) + pulsetime,
10 * (tau_m + tau_s) + pulsetime))
plt.show()
|
gpl-2.0
|
alekz112/statsmodels
|
statsmodels/datasets/fair/data.py
|
25
|
3074
|
#! /usr/bin/env python
"""Fair's Extramarital Affairs Data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Included with permission of the author."""
TITLE = """Affairs dataset"""
SOURCE = """
Fair, Ray. 1978. "A Theory of Extramarital Affairs," `Journal of Political
Economy`, February, 45-61.
The data is available at http://fairmodel.econ.yale.edu/rayfair/pdf/2011b.htm
"""
DESCRSHORT = """Extramarital affair data."""
DESCRLONG = """Extramarital affair data used to explain the allocation
of an individual's time among work, time spent with a spouse, and time
spent with a paramour. The data is used as an example of regression
with censored data."""
#suggested notes
NOTE = """::
Number of observations: 6366
Number of variables: 9
Variable name definitions:
rate_marriage : How rate marriage, 1 = very poor, 2 = poor, 3 = fair,
4 = good, 5 = very good
age : Age
yrs_married : No. years married. Interval approximations. See
original paper for detailed explanation.
children : No. children
religious : How relgious, 1 = not, 2 = mildly, 3 = fairly,
4 = strongly
educ : Level of education, 9 = grade school, 12 = high
school, 14 = some college, 16 = college graduate,
17 = some graduate school, 20 = advanced degree
occupation : 1 = student, 2 = farming, agriculture; semi-skilled,
or unskilled worker; 3 = white-colloar; 4 = teacher
counselor social worker, nurse; artist, writers;
technician, skilled worker, 5 = managerial,
administrative, business, 6 = professional with
advanced degree
occupation_husb : Husband's occupation. Same as occupation.
affairs : measure of time spent in extramarital affairs
See the original paper for more details.
"""
import numpy as np
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray(data, endog_idx=8, exog_idx=None, dtype=float)
def load_pandas():
data = _get_data()
##### SET THE INDICES #####
#NOTE: None for exog_idx is the complement of endog_idx
return du.process_recarray_pandas(data, endog_idx=8, exog_idx=None,
dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
data = np.recfromtxt(open(filepath + '/fair.csv', 'rb'),
delimiter=",", names = True, dtype=float)
return data
|
bsd-3-clause
|
astroML/astroML
|
examples/datasets/plot_moving_objects.py
|
2
|
3723
|
"""
SDSS Moving Object Catalog
--------------------------
This plot demonstrates how to fetch data from the SDSS Moving object catalog,
and plot using a multicolor plot similar to that used in figures 3-4 of [1]_
References
~~~~~~~~~~
.. [1] Parker `et al.` 2008 http://adsabs.harvard.edu/abs/2008Icar..198..138P
"""
# Author: Jake VanderPlas <[email protected]>
# License: BSD
# The figure is an example from astroML: see http://astroML.github.com
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
from astroML.datasets import fetch_moving_objects
from astroML.plotting.tools import devectorize_axes
def black_bg_subplot(*args, **kwargs):
"""Create a subplot with black background"""
if int(matplotlib.__version__[0]) >= 2:
kwargs['facecolor'] = 'k'
else:
kwargs['axisbg'] = 'k'
ax = plt.subplot(*args, **kwargs)
# set ticks and labels to white
for spine in ax.spines.values():
spine.set_color('w')
for tick in ax.xaxis.get_major_ticks() + ax.yaxis.get_major_ticks():
for child in tick.get_children():
child.set_color('w')
return ax
def compute_color(mag_a, mag_i, mag_z, a_crit=-0.1):
"""
Compute the scatter-plot color using code adapted from
TCL source used in Parker 2008.
"""
# define the base color scalings
R = np.ones_like(mag_i)
G = 0.5 * 10 ** (-2 * (mag_i - mag_z - 0.01))
B = 1.5 * 10 ** (-8 * (mag_a + 0.0))
# enhance green beyond the a_crit cutoff
i = np.where(mag_a < a_crit)
G[i] += 10000 * (10 ** (-0.01 * (mag_a[i] - a_crit)) - 1)
# normalize color of each point to its maximum component
RGB = np.vstack([R, G, B])
RGB /= RGB.max(0)
# return an array of RGB colors, which is shape (n_points, 3)
return RGB.T
#------------------------------------------------------------
# Fetch data and extract the desired quantities
data = fetch_moving_objects(Parker2008_cuts=True)
mag_a = data['mag_a']
mag_i = data['mag_i']
mag_z = data['mag_z']
a = data['aprime']
sini = data['sin_iprime']
# dither: magnitudes are recorded only to +/- 0.01
mag_a += -0.005 + 0.01 * np.random.random(size=mag_a.shape)
mag_i += -0.005 + 0.01 * np.random.random(size=mag_i.shape)
mag_z += -0.005 + 0.01 * np.random.random(size=mag_z.shape)
# compute RGB color based on magnitudes
color = compute_color(mag_a, mag_i, mag_z)
#------------------------------------------------------------
# set up the plot
# plot the color-magnitude plot
fig = plt.figure(facecolor='k')
ax = black_bg_subplot(111)
ax.scatter(mag_a, mag_i - mag_z,
c=color, s=1, lw=0)
devectorize_axes(ax, dpi=400)
ax.plot([0, 0], [-0.8, 0.6], '--w', lw=2)
ax.plot([0, 0.4], [-0.15, -0.15], '--w', lw=2)
ax.set_xlim(-0.3, 0.4)
ax.set_ylim(-0.8, 0.6)
ax.set_xlabel('a*', color='w')
ax.set_ylabel('i-z', color='w')
# plot the orbital parameters plot
fig = plt.figure(facecolor='k')
ax = black_bg_subplot(111)
ax.scatter(a, sini,
c=color, s=1, lw=0)
devectorize_axes(ax, dpi=400)
ax.plot([2.5, 2.5], [-0.02, 0.3], '--w')
ax.plot([2.82, 2.82], [-0.02, 0.3], '--w')
ax.set_xlim(2.0, 3.3)
ax.set_ylim(-0.02, 0.3)
ax.set_xlabel('a (AU)', color='w')
ax.set_ylabel('sin(i)', color='w')
# label the plot
text_kwargs = dict(color='w', fontsize=14,
transform=plt.gca().transAxes,
ha='center', va='bottom')
ax.text(0.25, 1.01, 'Inner', **text_kwargs)
ax.text(0.53, 1.01, 'Mid', **text_kwargs)
ax.text(0.83, 1.01, 'Outer', **text_kwargs)
# Saving the black-background figure requires some extra arguments:
#fig.savefig('moving_objects.png',
# facecolor='black',
# edgecolor='none')
plt.show()
|
bsd-2-clause
|
Scoudem/audiolyze
|
plotable.py
|
1
|
5487
|
'''
File: plotable.py
Author: Tristan van Vaalen
Plotable data stream
'''
import collections
import numpy
import audiolazy
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import verbose
v = verbose.Verbose()
class Plotable:
def __init__(self, filt, **kwargs):
for key, value in kwargs.iteritems():
setattr(self, key, value)
self.filt = filt
self.window = numpy.array(audiolazy.window.hamming(self.length))
self.data = collections.deque([0.] * self.length, maxlen=self.length)
self.seconds, self.hertz = audiolazy.sHz(self.rate)
self.miliseconds = 1e-3 * self.seconds
self.setup_plot()
def append(self, element):
self.data.append(element)
def setup_plot(self, title='Audio stream analysis'):
if self.record:
self.figure = plt.figure(
title,
facecolor='#cccccc'
)
self.time_values = numpy.array(
list(
audiolazy.line(
self.length, -self.length / self.miliseconds, 0
)
)
)
v.debug('Buffer size: {}ms (t={} to t={})'.format(
abs(self.time_values[0]) - self.time_values[-1],
self.time_values[0], self.time_values[-1]
))
self.freq_values = numpy.array(
audiolazy.line(self.length, 0, 2 * audiolazy.pi / self.hertz)
.take(self.length // 2 + 1)
)
v.debug('Frequency range: {}Hz to {}Hz'.format(
self.freq_values[0], self.freq_values[-1]
))
self.dft_max_min, self.dft_max_max = 0.01, 1.0
xlim_t = (self.time_values[0], self.time_values[-1])
ylim_t = (-1., 1.)
xlim_f = (self.freq_values[0], self.freq_values[-1])
ylim_f = (0., .5 * (self.dft_max_max + self.dft_max_min))
self.time_ax, self.time_line = self._subplot_and_line(
1, xlim_t, ylim_t, '#00aaff', 'Time (ms)'
)
self.time_filt_ax, self.time_filt_line = self._subplot_and_line(
2, xlim_t, ylim_t, '#aa00ff', 'Filtered Time (ms)'
)
self.freq_ax, self.freq_line = self._subplot_and_line(
3, xlim_f, ylim_f, '#00aaff', 'Frequency (Hz)'
)
self.freq_filt_ax, self.freq_filt_line = self._subplot_and_line(
4, xlim_f, ylim_f, '#aa00ff', 'Filtered Frequency (Hz)'
)
if self.response:
v.debug('Plotting frequency response')
self.filt.plot()
if self.zplot:
v.debug('Plotting zero-pole plane')
self.filt.zplot()
def update_y_lim(self, ax, ax2, smax):
top = ax.get_ylim()[1]
if top < self.dft_max_max and abs(smax / top) > 1:
ax.set_ylim(top=top * 2)
ax2.set_ylim(top=top * 2)
return True
elif top > self.dft_max_min and abs(smax / top) < .2:
ax.set_ylim(top=top / 2)
ax2.set_ylim(top=top / 2)
return True
return False
def _subplot_and_line(self, index, xlim, ylim, color, label):
ax = plt.subplot(
2, 2, index,
xlim=xlim,
ylim=ylim,
axisbg='black'
)
ax.set_xlabel(label)
line = ax.plot(
[], [], linewidth=2, color=color
)[0]
return ax, line
def start_animation(self):
if self.record:
v.debug('Starting animation')
v.info('Large window size can seriously slow down rendering')
self.rempty = False
self.anim = FuncAnimation(
self.figure,
self.animate,
init_func=self.init,
interval=10,
blit=True
)
# plt.ioff()
plt.show()
def init(self):
self.time_line.set_data([], [])
self.freq_line.set_data([], [])
self.time_filt_line.set_data([], [])
self.freq_filt_line.set_data([], [])
self.figure.tight_layout()
if self.rempty:
return []
else:
return [
self.time_line,
self.freq_line,
self.time_filt_line,
self.freq_filt_line
]
def animate(self, idx):
if idx == 100:
plt.savefig('test.png')
array_data = numpy.array(self.data)
array_data_filt = self.filt(array_data).take(audiolazy.inf)
spectrum = numpy.abs(numpy.fft.rfft(array_data * self.window)) /\
self.length
spectrum_filt = numpy.abs(numpy.fft.rfft(array_data_filt * self.window)) /\
self.length
self.time_line.set_data(self.time_values, array_data)
self.time_filt_line.set_data(self.time_values, array_data_filt)
self.freq_line.set_data(self.freq_values, spectrum)
self.freq_filt_line.set_data(self.freq_values, spectrum_filt)
smax = spectrum.max()
s1 = self.update_y_lim(self.freq_ax, self.freq_filt_ax, smax)
if not s1:
self.rempty = True
return [
self.time_line,
self.freq_line,
self.time_filt_line,
self.freq_filt_line
]
return []
|
mit
|
sauloal/cnidaria
|
scripts/venv/lib/python2.7/site-packages/pandas/sparse/tests/test_sparse.py
|
1
|
67074
|
# pylint: disable-msg=E1101,W0612
import operator
from datetime import datetime
import functools
import nose
from numpy import nan
import numpy as np
import pandas as pd
dec = np.testing.dec
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
assert_frame_equal, assert_panel_equal, assertRaisesRegexp, assert_array_equal)
from numpy.testing import assert_equal
from pandas import Series, DataFrame, bdate_range, Panel, MultiIndex
from pandas.core.datetools import BDay
from pandas.core.index import Index
from pandas.tseries.index import DatetimeIndex
import pandas.core.datetools as datetools
from pandas.core.common import isnull
import pandas.util.testing as tm
from pandas.compat import range, lrange, StringIO, lrange
from pandas import compat
from pandas.tools.util import cartesian_product
import pandas.sparse.frame as spf
from pandas._sparse import BlockIndex, IntIndex
from pandas.sparse.api import (SparseSeries, SparseTimeSeries,
SparseDataFrame, SparsePanel,
SparseArray)
import pandas.tests.test_frame as test_frame
import pandas.tests.test_panel as test_panel
import pandas.tests.test_series as test_series
from pandas.sparse.tests.test_array import assert_sp_array_equal
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
def _test_data1():
# nan-based
arr = np.arange(20, dtype=float)
index = np.arange(20)
arr[:2] = nan
arr[5:10] = nan
arr[-3:] = nan
return arr, index
def _test_data2():
# nan-based
arr = np.arange(15, dtype=float)
index = np.arange(15)
arr[7:12] = nan
arr[-1:] = nan
return arr, index
def _test_data1_zero():
# zero-based
arr, index = _test_data1()
arr[np.isnan(arr)] = 0
return arr, index
def _test_data2_zero():
# zero-based
arr, index = _test_data2()
arr[np.isnan(arr)] = 0
return arr, index
def assert_sp_series_equal(a, b, exact_indices=True):
assert(a.index.equals(b.index))
assert_sp_array_equal(a, b)
def assert_sp_frame_equal(left, right, exact_indices=True):
"""
exact: Series SparseIndex objects must be exactly the same, otherwise just
compare dense representations
"""
for col, series in compat.iteritems(left):
assert(col in right)
# trade-off?
if exact_indices:
assert_sp_series_equal(series, right[col])
else:
assert_series_equal(series.to_dense(), right[col].to_dense())
assert_almost_equal(left.default_fill_value,
right.default_fill_value)
# do I care?
# assert(left.default_kind == right.default_kind)
for col in right:
assert(col in left)
def assert_sp_panel_equal(left, right, exact_indices=True):
for item, frame in compat.iteritems(left):
assert(item in right)
# trade-off?
assert_sp_frame_equal(frame, right[item], exact_indices=exact_indices)
assert_almost_equal(left.default_fill_value,
right.default_fill_value)
assert(left.default_kind == right.default_kind)
for item in right:
assert(item in left)
class TestSparseSeries(tm.TestCase,
test_series.CheckNameIntegration):
_multiprocess_can_split_ = True
def setUp(self):
arr, index = _test_data1()
date_index = bdate_range('1/1/2011', periods=len(index))
self.bseries = SparseSeries(arr, index=index, kind='block')
self.bseries.name = 'bseries'
self.ts = self.bseries
self.btseries = SparseSeries(arr, index=date_index, kind='block')
self.iseries = SparseSeries(arr, index=index, kind='integer')
arr, index = _test_data2()
self.bseries2 = SparseSeries(arr, index=index, kind='block')
self.iseries2 = SparseSeries(arr, index=index, kind='integer')
arr, index = _test_data1_zero()
self.zbseries = SparseSeries(arr, index=index, kind='block',
fill_value=0)
self.ziseries = SparseSeries(arr, index=index, kind='integer',
fill_value=0)
arr, index = _test_data2_zero()
self.zbseries2 = SparseSeries(arr, index=index, kind='block',
fill_value=0)
self.ziseries2 = SparseSeries(arr, index=index, kind='integer',
fill_value=0)
def test_iteration_and_str(self):
[x for x in self.bseries]
str(self.bseries)
def test_construct_DataFrame_with_sp_series(self):
# it works!
df = DataFrame({'col': self.bseries})
# printing & access
df.iloc[:1]
df['col']
df.dtypes
str(df)
assert_sp_series_equal(df['col'], self.bseries)
result = df.iloc[:, 0]
assert_sp_series_equal(result, self.bseries)
# blocking
expected = Series({'col': 'float64:sparse'})
result = df.ftypes
assert_series_equal(expected, result)
def test_series_density(self):
# GH2803
ts = Series(np.random.randn(10))
ts[2:-2] = nan
sts = ts.to_sparse()
density = sts.density # don't die
self.assertEqual(density, 4 / 10.0)
def test_sparse_to_dense(self):
arr, index = _test_data1()
series = self.bseries.to_dense()
assert_equal(series, arr)
series = self.bseries.to_dense(sparse_only=True)
assert_equal(series, arr[np.isfinite(arr)])
series = self.iseries.to_dense()
assert_equal(series, arr)
arr, index = _test_data1_zero()
series = self.zbseries.to_dense()
assert_equal(series, arr)
series = self.ziseries.to_dense()
assert_equal(series, arr)
def test_dense_to_sparse(self):
series = self.bseries.to_dense()
bseries = series.to_sparse(kind='block')
iseries = series.to_sparse(kind='integer')
assert_sp_series_equal(bseries, self.bseries)
assert_sp_series_equal(iseries, self.iseries)
# non-NaN fill value
series = self.zbseries.to_dense()
zbseries = series.to_sparse(kind='block', fill_value=0)
ziseries = series.to_sparse(kind='integer', fill_value=0)
assert_sp_series_equal(zbseries, self.zbseries)
assert_sp_series_equal(ziseries, self.ziseries)
def test_to_dense_preserve_name(self):
assert(self.bseries.name is not None)
result = self.bseries.to_dense()
self.assertEqual(result.name, self.bseries.name)
def test_constructor(self):
# test setup guys
self.assertTrue(np.isnan(self.bseries.fill_value))
tm.assert_isinstance(self.bseries.sp_index, BlockIndex)
self.assertTrue(np.isnan(self.iseries.fill_value))
tm.assert_isinstance(self.iseries.sp_index, IntIndex)
self.assertEqual(self.zbseries.fill_value, 0)
assert_equal(self.zbseries.values.values,
self.bseries.to_dense().fillna(0).values)
# pass SparseSeries
s2 = SparseSeries(self.bseries)
s3 = SparseSeries(self.iseries)
s4 = SparseSeries(self.zbseries)
assert_sp_series_equal(s2, self.bseries)
assert_sp_series_equal(s3, self.iseries)
assert_sp_series_equal(s4, self.zbseries)
# Sparse time series works
date_index = bdate_range('1/1/2000', periods=len(self.bseries))
s5 = SparseSeries(self.bseries, index=date_index)
tm.assert_isinstance(s5, SparseTimeSeries)
# pass Series
bseries2 = SparseSeries(self.bseries.to_dense())
assert_equal(self.bseries.sp_values, bseries2.sp_values)
# pass dict?
# don't copy the data by default
values = np.ones(self.bseries.npoints)
sp = SparseSeries(values, sparse_index=self.bseries.sp_index)
sp.sp_values[:5] = 97
self.assertEqual(values[0], 97)
# but can make it copy!
sp = SparseSeries(values, sparse_index=self.bseries.sp_index,
copy=True)
sp.sp_values[:5] = 100
self.assertEqual(values[0], 97)
def test_constructor_scalar(self):
data = 5
sp = SparseSeries(data, np.arange(100))
sp = sp.reindex(np.arange(200))
self.assertTrue((sp.ix[:99] == data).all())
self.assertTrue(isnull(sp.ix[100:]).all())
data = np.nan
sp = SparseSeries(data, np.arange(100))
def test_constructor_ndarray(self):
pass
def test_constructor_nonnan(self):
arr = [0, 0, 0, nan, nan]
sp_series = SparseSeries(arr, fill_value=0)
assert_equal(sp_series.values.values, arr)
# GH 9272
def test_constructor_empty(self):
sp = SparseSeries()
self.assertEqual(len(sp.index), 0)
def test_copy_astype(self):
cop = self.bseries.astype(np.float64)
self.assertIsNot(cop, self.bseries)
self.assertIs(cop.sp_index, self.bseries.sp_index)
self.assertEqual(cop.dtype, np.float64)
cop2 = self.iseries.copy()
assert_sp_series_equal(cop, self.bseries)
assert_sp_series_equal(cop2, self.iseries)
# test that data is copied
cop[:5] = 97
self.assertEqual(cop.sp_values[0], 97)
self.assertNotEqual(self.bseries.sp_values[0], 97)
# correct fill value
zbcop = self.zbseries.copy()
zicop = self.ziseries.copy()
assert_sp_series_equal(zbcop, self.zbseries)
assert_sp_series_equal(zicop, self.ziseries)
# no deep copy
view = self.bseries.copy(deep=False)
view.sp_values[:5] = 5
self.assertTrue((self.bseries.sp_values[:5] == 5).all())
def test_astype(self):
self.assertRaises(Exception, self.bseries.astype, np.int64)
def test_kind(self):
self.assertEqual(self.bseries.kind, 'block')
self.assertEqual(self.iseries.kind, 'integer')
def test_pickle(self):
def _test_roundtrip(series):
unpickled = self.round_trip_pickle(series)
assert_sp_series_equal(series, unpickled)
assert_series_equal(series.to_dense(), unpickled.to_dense())
self._check_all(_test_roundtrip)
def _check_all(self, check_func):
check_func(self.bseries)
check_func(self.iseries)
check_func(self.zbseries)
check_func(self.ziseries)
def test_getitem(self):
def _check_getitem(sp, dense):
for idx, val in compat.iteritems(dense):
assert_almost_equal(val, sp[idx])
for i in range(len(dense)):
assert_almost_equal(sp[i], dense[i])
# j = np.float64(i)
# assert_almost_equal(sp[j], dense[j])
# API change 1/6/2012
# negative getitem works
# for i in xrange(len(dense)):
# assert_almost_equal(sp[-i], dense[-i])
_check_getitem(self.bseries, self.bseries.to_dense())
_check_getitem(self.btseries, self.btseries.to_dense())
_check_getitem(self.zbseries, self.zbseries.to_dense())
_check_getitem(self.iseries, self.iseries.to_dense())
_check_getitem(self.ziseries, self.ziseries.to_dense())
# exception handling
self.assertRaises(Exception, self.bseries.__getitem__,
len(self.bseries) + 1)
# index not contained
self.assertRaises(Exception, self.btseries.__getitem__,
self.btseries.index[-1] + BDay())
def test_get_get_value(self):
assert_almost_equal(self.bseries.get(10), self.bseries[10])
self.assertIsNone(self.bseries.get(len(self.bseries) + 1))
dt = self.btseries.index[10]
result = self.btseries.get(dt)
expected = self.btseries.to_dense()[dt]
assert_almost_equal(result, expected)
assert_almost_equal(self.bseries.get_value(10), self.bseries[10])
def test_set_value(self):
idx = self.btseries.index[7]
self.btseries.set_value(idx, 0)
self.assertEqual(self.btseries[idx], 0)
self.iseries.set_value('foobar', 0)
self.assertEqual(self.iseries.index[-1], 'foobar')
self.assertEqual(self.iseries['foobar'], 0)
def test_getitem_slice(self):
idx = self.bseries.index
res = self.bseries[::2]
tm.assert_isinstance(res, SparseSeries)
expected = self.bseries.reindex(idx[::2])
assert_sp_series_equal(res, expected)
res = self.bseries[:5]
tm.assert_isinstance(res, SparseSeries)
assert_sp_series_equal(res, self.bseries.reindex(idx[:5]))
res = self.bseries[5:]
assert_sp_series_equal(res, self.bseries.reindex(idx[5:]))
# negative indices
res = self.bseries[:-3]
assert_sp_series_equal(res, self.bseries.reindex(idx[:-3]))
def test_take(self):
def _compare_with_dense(sp):
dense = sp.to_dense()
def _compare(idx):
dense_result = dense.take(idx).values
sparse_result = sp.take(idx)
self.assertIsInstance(sparse_result, SparseSeries)
assert_almost_equal(dense_result, sparse_result.values.values)
_compare([1., 2., 3., 4., 5., 0.])
_compare([7, 2, 9, 0, 4])
_compare([3, 6, 3, 4, 7])
self._check_all(_compare_with_dense)
self.assertRaises(Exception, self.bseries.take,
[0, len(self.bseries) + 1])
# Corner case
sp = SparseSeries(np.ones(10.) * nan)
assert_almost_equal(sp.take([0, 1, 2, 3, 4]), np.repeat(nan, 5))
def test_setitem(self):
self.bseries[5] = 7.
self.assertEqual(self.bseries[5], 7.)
def test_setslice(self):
self.bseries[5:10] = 7.
assert_series_equal(self.bseries[5:10].to_dense(), Series(
7., index=range(5, 10), name=self.bseries.name))
def test_operators(self):
def _check_op(a, b, op):
sp_result = op(a, b)
adense = a.to_dense() if isinstance(a, SparseSeries) else a
bdense = b.to_dense() if isinstance(b, SparseSeries) else b
dense_result = op(adense, bdense)
assert_almost_equal(sp_result.to_dense(), dense_result)
def check(a, b):
_check_op(a, b, operator.add)
_check_op(a, b, operator.sub)
_check_op(a, b, operator.truediv)
_check_op(a, b, operator.floordiv)
_check_op(a, b, operator.mul)
_check_op(a, b, lambda x, y: operator.add(y, x))
_check_op(a, b, lambda x, y: operator.sub(y, x))
_check_op(a, b, lambda x, y: operator.truediv(y, x))
_check_op(a, b, lambda x, y: operator.floordiv(y, x))
_check_op(a, b, lambda x, y: operator.mul(y, x))
# NaN ** 0 = 1 in C?
# _check_op(a, b, operator.pow)
# _check_op(a, b, lambda x, y: operator.pow(y, x))
check(self.bseries, self.bseries)
check(self.iseries, self.iseries)
check(self.bseries, self.iseries)
check(self.bseries, self.bseries2)
check(self.bseries, self.iseries2)
check(self.iseries, self.iseries2)
# scalar value
check(self.bseries, 5)
# zero-based
check(self.zbseries, self.zbseries * 2)
check(self.zbseries, self.zbseries2)
check(self.ziseries, self.ziseries2)
# with dense
result = self.bseries + self.bseries.to_dense()
assert_sp_series_equal(result, self.bseries + self.bseries)
# @dec.knownfailureif(True, 'Known NumPy failer as of 1.5.1')
def test_operators_corner2(self):
raise nose.SkipTest('known failer on numpy 1.5.1')
# NumPy circumvents __r*__ operations
val = np.float64(3.0)
result = val - self.zbseries
assert_sp_series_equal(result, 3 - self.zbseries)
def test_binary_operators(self):
# skipping for now #####
raise nose.SkipTest("skipping sparse binary operators test")
def _check_inplace_op(iop, op):
tmp = self.bseries.copy()
expected = op(tmp, self.bseries)
iop(tmp, self.bseries)
assert_sp_series_equal(tmp, expected)
inplace_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'pow']
for op in inplace_ops:
_check_inplace_op(
getattr(operator, "i%s" % op), getattr(operator, op))
def test_reindex(self):
def _compare_with_series(sps, new_index):
spsre = sps.reindex(new_index)
series = sps.to_dense()
seriesre = series.reindex(new_index)
seriesre = seriesre.to_sparse(fill_value=sps.fill_value)
assert_sp_series_equal(spsre, seriesre)
assert_series_equal(spsre.to_dense(), seriesre.to_dense())
_compare_with_series(self.bseries, self.bseries.index[::2])
_compare_with_series(self.bseries, list(self.bseries.index[::2]))
_compare_with_series(self.bseries, self.bseries.index[:10])
_compare_with_series(self.bseries, self.bseries.index[5:])
_compare_with_series(self.zbseries, self.zbseries.index[::2])
_compare_with_series(self.zbseries, self.zbseries.index[:10])
_compare_with_series(self.zbseries, self.zbseries.index[5:])
# special cases
same_index = self.bseries.reindex(self.bseries.index)
assert_sp_series_equal(self.bseries, same_index)
self.assertIsNot(same_index, self.bseries)
# corner cases
sp = SparseSeries([], index=[])
sp_zero = SparseSeries([], index=[], fill_value=0)
_compare_with_series(sp, np.arange(10))
# with copy=False
reindexed = self.bseries.reindex(self.bseries.index, copy=True)
reindexed.sp_values[:] = 1.
self.assertTrue((self.bseries.sp_values != 1.).all())
reindexed = self.bseries.reindex(self.bseries.index, copy=False)
reindexed.sp_values[:] = 1.
np.testing.assert_array_equal(self.bseries.sp_values, 1.)
def test_sparse_reindex(self):
length = 10
def _check(values, index1, index2, fill_value):
first_series = SparseSeries(values, sparse_index=index1,
fill_value=fill_value)
reindexed = first_series.sparse_reindex(index2)
self.assertIs(reindexed.sp_index, index2)
int_indices1 = index1.to_int_index().indices
int_indices2 = index2.to_int_index().indices
expected = Series(values, index=int_indices1)
expected = expected.reindex(int_indices2).fillna(fill_value)
assert_almost_equal(expected.values, reindexed.sp_values)
# make sure level argument asserts
expected = expected.reindex(int_indices2).fillna(fill_value)
def _check_with_fill_value(values, first, second, fill_value=nan):
i_index1 = IntIndex(length, first)
i_index2 = IntIndex(length, second)
b_index1 = i_index1.to_block_index()
b_index2 = i_index2.to_block_index()
_check(values, i_index1, i_index2, fill_value)
_check(values, b_index1, b_index2, fill_value)
def _check_all(values, first, second):
_check_with_fill_value(values, first, second, fill_value=nan)
_check_with_fill_value(values, first, second, fill_value=0)
index1 = [2, 4, 5, 6, 8, 9]
values1 = np.arange(6.)
_check_all(values1, index1, [2, 4, 5])
_check_all(values1, index1, [2, 3, 4, 5, 6, 7, 8, 9])
_check_all(values1, index1, [0, 1])
_check_all(values1, index1, [0, 1, 7, 8, 9])
_check_all(values1, index1, [])
first_series = SparseSeries(values1, sparse_index=IntIndex(length,
index1),
fill_value=nan)
with tm.assertRaisesRegexp(TypeError,
'new index must be a SparseIndex'):
reindexed = first_series.sparse_reindex(0)
def test_repr(self):
bsrepr = repr(self.bseries)
isrepr = repr(self.iseries)
def test_iter(self):
pass
def test_truncate(self):
pass
def test_fillna(self):
pass
def test_groupby(self):
pass
def test_reductions(self):
def _compare_with_dense(obj, op):
sparse_result = getattr(obj, op)()
series = obj.to_dense()
dense_result = getattr(series, op)()
self.assertEqual(sparse_result, dense_result)
to_compare = ['count', 'sum', 'mean', 'std', 'var', 'skew']
def _compare_all(obj):
for op in to_compare:
_compare_with_dense(obj, op)
_compare_all(self.bseries)
self.bseries.sp_values[5:10] = np.NaN
_compare_all(self.bseries)
_compare_all(self.zbseries)
self.zbseries.sp_values[5:10] = np.NaN
_compare_all(self.zbseries)
series = self.zbseries.copy()
series.fill_value = 2
_compare_all(series)
nonna = Series(np.random.randn(20)).to_sparse()
_compare_all(nonna)
nonna2 = Series(np.random.randn(20)).to_sparse(fill_value=0)
_compare_all(nonna2)
def test_dropna(self):
sp = SparseSeries([0, 0, 0, nan, nan, 5, 6],
fill_value=0)
sp_valid = sp.valid()
expected = sp.to_dense().valid()
expected = expected[expected != 0]
assert_almost_equal(sp_valid.values, expected.values)
self.assertTrue(sp_valid.index.equals(expected.index))
self.assertEqual(len(sp_valid.sp_values), 2)
result = self.bseries.dropna()
expected = self.bseries.to_dense().dropna()
self.assertNotIsInstance(result, SparseSeries)
tm.assert_series_equal(result, expected)
def test_homogenize(self):
def _check_matches(indices, expected):
data = {}
for i, idx in enumerate(indices):
data[i] = SparseSeries(idx.to_int_index().indices,
sparse_index=idx)
homogenized = spf.homogenize(data)
for k, v in compat.iteritems(homogenized):
assert(v.sp_index.equals(expected))
indices1 = [BlockIndex(10, [2], [7]),
BlockIndex(10, [1, 6], [3, 4]),
BlockIndex(10, [0], [10])]
expected1 = BlockIndex(10, [2, 6], [2, 3])
_check_matches(indices1, expected1)
indices2 = [BlockIndex(10, [2], [7]),
BlockIndex(10, [2], [7])]
expected2 = indices2[0]
_check_matches(indices2, expected2)
# must have NaN fill value
data = {'a': SparseSeries(np.arange(7), sparse_index=expected2,
fill_value=0)}
assertRaisesRegexp(TypeError, "NaN fill value", spf.homogenize, data)
def test_fill_value_corner(self):
cop = self.zbseries.copy()
cop.fill_value = 0
result = self.bseries / cop
self.assertTrue(np.isnan(result.fill_value))
cop2 = self.zbseries.copy()
cop2.fill_value = 1
result = cop2 / cop
self.assertTrue(np.isnan(result.fill_value))
def test_shift(self):
series = SparseSeries([nan, 1., 2., 3., nan, nan],
index=np.arange(6))
shifted = series.shift(0)
self.assertIsNot(shifted, series)
assert_sp_series_equal(shifted, series)
f = lambda s: s.shift(1)
_dense_series_compare(series, f)
f = lambda s: s.shift(-2)
_dense_series_compare(series, f)
series = SparseSeries([nan, 1., 2., 3., nan, nan],
index=bdate_range('1/1/2000', periods=6))
f = lambda s: s.shift(2, freq='B')
_dense_series_compare(series, f)
f = lambda s: s.shift(2, freq=datetools.bday)
_dense_series_compare(series, f)
def test_cumsum(self):
result = self.bseries.cumsum()
expected = self.bseries.to_dense().cumsum()
tm.assert_isinstance(result, SparseSeries)
self.assertEqual(result.name, self.bseries.name)
assert_series_equal(result.to_dense(), expected)
result = self.zbseries.cumsum()
expected = self.zbseries.to_dense().cumsum()
tm.assert_isinstance(result, Series)
assert_series_equal(result, expected)
def test_combine_first(self):
s = self.bseries
result = s[::2].combine_first(s)
result2 = s[::2].combine_first(s.to_dense())
expected = s[::2].to_dense().combine_first(s.to_dense())
expected = expected.to_sparse(fill_value=s.fill_value)
assert_sp_series_equal(result, result2)
assert_sp_series_equal(result, expected)
class TestSparseSeriesScipyInteraction(tm.TestCase):
# Issue 8048: add SparseSeries coo methods
def setUp(self):
tm._skip_if_no_scipy()
import scipy.sparse
# SparseSeries inputs used in tests, the tests rely on the order
self.sparse_series = []
s = pd.Series([3.0, nan, 1.0, 2.0, nan, nan])
s.index = pd.MultiIndex.from_tuples([(1, 2, 'a', 0),
(1, 2, 'a', 1),
(1, 1, 'b', 0),
(1, 1, 'b', 1),
(2, 1, 'b', 0),
(2, 1, 'b', 1)],
names=['A', 'B', 'C', 'D'])
self.sparse_series.append(s.to_sparse())
ss = self.sparse_series[0].copy()
ss.index.names = [3, 0, 1, 2]
self.sparse_series.append(ss)
ss = pd.Series(
[nan] * 12, index=cartesian_product((range(3), range(4)))).to_sparse()
for k, v in zip([(0, 0), (1, 2), (1, 3)], [3.0, 1.0, 2.0]):
ss[k] = v
self.sparse_series.append(ss)
# results used in tests
self.coo_matrices = []
self.coo_matrices.append(scipy.sparse.coo_matrix(
([3.0, 1.0, 2.0], ([0, 1, 1], [0, 2, 3])), shape=(3, 4)))
self.coo_matrices.append(scipy.sparse.coo_matrix(
([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(3, 4)))
self.coo_matrices.append(scipy.sparse.coo_matrix(
([3.0, 1.0, 2.0], ([0, 1, 1], [0, 0, 1])), shape=(3, 2)))
self.ils = [[(1, 2), (1, 1), (2, 1)], [(1, 1), (1, 2), (2, 1)], [(1, 2, 'a'), (1, 1, 'b'), (2, 1, 'b')]]
self.jls = [[('a', 0), ('a', 1), ('b', 0), ('b', 1)], [0, 1]]
def test_to_coo_text_names_integer_row_levels_nosort(self):
ss = self.sparse_series[0]
kwargs = {'row_levels': [0, 1], 'column_levels': [2, 3]}
result = (self.coo_matrices[0], self.ils[0], self.jls[0])
self._run_test(ss, kwargs, result)
def test_to_coo_text_names_integer_row_levels_sort(self):
ss = self.sparse_series[0]
kwargs = {'row_levels': [0, 1],
'column_levels': [2, 3], 'sort_labels': True}
result = (self.coo_matrices[1], self.ils[1], self.jls[0])
self._run_test(ss, kwargs, result)
def test_to_coo_text_names_text_row_levels_nosort_col_level_single(self):
ss = self.sparse_series[0]
kwargs = {'row_levels': ['A', 'B', 'C'],
'column_levels': ['D'], 'sort_labels': False}
result = (self.coo_matrices[2], self.ils[2], self.jls[1])
self._run_test(ss, kwargs, result)
def test_to_coo_integer_names_integer_row_levels_nosort(self):
ss = self.sparse_series[1]
kwargs = {'row_levels': [3, 0], 'column_levels': [1, 2]}
result = (self.coo_matrices[0], self.ils[0], self.jls[0])
self._run_test(ss, kwargs, result)
def test_to_coo_text_names_text_row_levels_nosort(self):
ss = self.sparse_series[0]
kwargs = {'row_levels': ['A', 'B'], 'column_levels': ['C', 'D']}
result = (self.coo_matrices[0], self.ils[0], self.jls[0])
self._run_test(ss, kwargs, result)
def test_to_coo_bad_partition_nonnull_intersection(self):
ss = self.sparse_series[0]
self.assertRaises(ValueError, ss.to_coo, ['A', 'B', 'C'], ['C', 'D'])
def test_to_coo_bad_partition_small_union(self):
ss = self.sparse_series[0]
self.assertRaises(ValueError, ss.to_coo, ['A'], ['C', 'D'])
def test_to_coo_nlevels_less_than_two(self):
ss = self.sparse_series[0]
ss.index = np.arange(len(ss.index))
self.assertRaises(ValueError, ss.to_coo)
def test_to_coo_bad_ilevel(self):
ss = self.sparse_series[0]
self.assertRaises(KeyError, ss.to_coo, ['A', 'B'], ['C', 'D', 'E'])
def test_to_coo_duplicate_index_entries(self):
ss = pd.concat(
[self.sparse_series[0], self.sparse_series[0]]).to_sparse()
self.assertRaises(ValueError, ss.to_coo, ['A', 'B'], ['C', 'D'])
def test_from_coo_dense_index(self):
ss = SparseSeries.from_coo(self.coo_matrices[0], dense_index=True)
check = self.sparse_series[2]
assert_sp_series_equal(ss, check)
def test_from_coo_nodense_index(self):
ss = SparseSeries.from_coo(self.coo_matrices[0], dense_index=False)
check = self.sparse_series[2]
check = check.dropna().to_sparse()
assert_sp_series_equal(ss, check)
def _run_test(self, ss, kwargs, check):
results = ss.to_coo(**kwargs)
self._check_results_to_coo(results, check)
# for every test, also test symmetry property (transpose), switch
# row_levels and column_levels
d = kwargs.copy()
d['row_levels'] = kwargs['column_levels']
d['column_levels'] = kwargs['row_levels']
results = ss.to_coo(**d)
results = (results[0].T, results[2], results[1])
self._check_results_to_coo(results, check)
@staticmethod
def _check_results_to_coo(results, check):
(A, il, jl) = results
(A_result, il_result, jl_result) = check
# convert to dense and compare
assert_array_equal(A.todense(), A_result.todense())
# or compare directly as difference of sparse
# assert(abs(A - A_result).max() < 1e-12) # max is failing in python
# 2.6
assert_equal(il, il_result)
assert_equal(jl, jl_result)
class TestSparseTimeSeries(tm.TestCase):
pass
class TestSparseDataFrame(tm.TestCase, test_frame.SafeForSparse):
klass = SparseDataFrame
_multiprocess_can_split_ = True
def setUp(self):
self.data = {'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
'C': np.arange(10),
'D': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]}
self.dates = bdate_range('1/1/2011', periods=10)
self.frame = SparseDataFrame(self.data, index=self.dates)
self.iframe = SparseDataFrame(self.data, index=self.dates,
default_kind='integer')
values = self.frame.values.copy()
values[np.isnan(values)] = 0
self.zframe = SparseDataFrame(values, columns=['A', 'B', 'C', 'D'],
default_fill_value=0,
index=self.dates)
values = self.frame.values.copy()
values[np.isnan(values)] = 2
self.fill_frame = SparseDataFrame(values, columns=['A', 'B', 'C', 'D'],
default_fill_value=2,
index=self.dates)
self.empty = SparseDataFrame()
def test_as_matrix(self):
empty = self.empty.as_matrix()
self.assertEqual(empty.shape, (0, 0))
no_cols = SparseDataFrame(index=np.arange(10))
mat = no_cols.as_matrix()
self.assertEqual(mat.shape, (10, 0))
no_index = SparseDataFrame(columns=np.arange(10))
mat = no_index.as_matrix()
self.assertEqual(mat.shape, (0, 10))
def test_copy(self):
cp = self.frame.copy()
tm.assert_isinstance(cp, SparseDataFrame)
assert_sp_frame_equal(cp, self.frame)
# as of v0.15.0
# this is now identical (but not is_a )
self.assertTrue(cp.index.identical(self.frame.index))
def test_constructor(self):
for col, series in compat.iteritems(self.frame):
tm.assert_isinstance(series, SparseSeries)
tm.assert_isinstance(self.iframe['A'].sp_index, IntIndex)
# constructed zframe from matrix above
self.assertEqual(self.zframe['A'].fill_value, 0)
assert_almost_equal([0, 0, 0, 0, 1, 2, 3, 4, 5, 6],
self.zframe['A'].values)
# construct no data
sdf = SparseDataFrame(columns=np.arange(10), index=np.arange(10))
for col, series in compat.iteritems(sdf):
tm.assert_isinstance(series, SparseSeries)
# construct from nested dict
data = {}
for c, s in compat.iteritems(self.frame):
data[c] = s.to_dict()
sdf = SparseDataFrame(data)
assert_sp_frame_equal(sdf, self.frame)
# TODO: test data is copied from inputs
# init dict with different index
idx = self.frame.index[:5]
cons = SparseDataFrame(self.frame, index=idx,
columns=self.frame.columns,
default_fill_value=self.frame.default_fill_value,
default_kind=self.frame.default_kind,
copy=True)
reindexed = self.frame.reindex(idx)
assert_sp_frame_equal(cons, reindexed, exact_indices=False)
# assert level parameter breaks reindex
self.assertRaises(TypeError, self.frame.reindex, idx, level=0)
repr(self.frame)
def test_constructor_ndarray(self):
# no index or columns
sp = SparseDataFrame(self.frame.values)
# 1d
sp = SparseDataFrame(self.data['A'], index=self.dates,
columns=['A'])
assert_sp_frame_equal(sp, self.frame.reindex(columns=['A']))
# raise on level argument
self.assertRaises(TypeError, self.frame.reindex, columns=['A'],
level=1)
# wrong length index / columns
assertRaisesRegexp(
ValueError, "^Index length", SparseDataFrame, self.frame.values,
index=self.frame.index[:-1])
assertRaisesRegexp(
ValueError, "^Column length", SparseDataFrame, self.frame.values,
columns=self.frame.columns[:-1])
# GH 9272
def test_constructor_empty(self):
sp = SparseDataFrame()
self.assertEqual(len(sp.index), 0)
self.assertEqual(len(sp.columns), 0)
def test_constructor_dataframe(self):
dense = self.frame.to_dense()
sp = SparseDataFrame(dense)
assert_sp_frame_equal(sp, self.frame)
def test_constructor_convert_index_once(self):
arr = np.array([1.5, 2.5, 3.5])
sdf = SparseDataFrame(columns=lrange(4), index=arr)
self.assertTrue(sdf[0].index is sdf[1].index)
def test_constructor_from_series(self):
# GH 2873
x = Series(np.random.randn(10000), name='a')
x = x.to_sparse(fill_value=0)
tm.assert_isinstance(x, SparseSeries)
df = SparseDataFrame(x)
tm.assert_isinstance(df, SparseDataFrame)
x = Series(np.random.randn(10000), name='a')
y = Series(np.random.randn(10000), name='b')
x2 = x.astype(float)
x2.ix[:9998] = np.NaN
x_sparse = x2.to_sparse(fill_value=np.NaN)
# Currently fails too with weird ufunc error
# df1 = SparseDataFrame([x_sparse, y])
y.ix[:9998] = 0
y_sparse = y.to_sparse(fill_value=0)
# without sparse value raises error
# df2 = SparseDataFrame([x2_sparse, y])
def test_dtypes(self):
df = DataFrame(np.random.randn(10000, 4))
df.ix[:9998] = np.nan
sdf = df.to_sparse()
result = sdf.get_dtype_counts()
expected = Series({'float64': 4})
assert_series_equal(result, expected)
def test_str(self):
df = DataFrame(np.random.randn(10000, 4))
df.ix[:9998] = np.nan
sdf = df.to_sparse()
str(sdf)
def test_array_interface(self):
res = np.sqrt(self.frame)
dres = np.sqrt(self.frame.to_dense())
assert_frame_equal(res.to_dense(), dres)
def test_pickle(self):
def _test_roundtrip(frame):
result = self.round_trip_pickle(frame)
assert_sp_frame_equal(frame, result)
_test_roundtrip(SparseDataFrame())
self._check_all(_test_roundtrip)
def test_dense_to_sparse(self):
df = DataFrame({'A': [nan, nan, nan, 1, 2],
'B': [1, 2, nan, nan, nan]})
sdf = df.to_sparse()
tm.assert_isinstance(sdf, SparseDataFrame)
self.assertTrue(np.isnan(sdf.default_fill_value))
tm.assert_isinstance(sdf['A'].sp_index, BlockIndex)
tm.assert_frame_equal(sdf.to_dense(), df)
sdf = df.to_sparse(kind='integer')
tm.assert_isinstance(sdf['A'].sp_index, IntIndex)
df = DataFrame({'A': [0, 0, 0, 1, 2],
'B': [1, 2, 0, 0, 0]}, dtype=float)
sdf = df.to_sparse(fill_value=0)
self.assertEqual(sdf.default_fill_value, 0)
tm.assert_frame_equal(sdf.to_dense(), df)
def test_density(self):
df = SparseSeries([nan, nan, nan, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(df.density, 0.7)
def test_sparse_to_dense(self):
pass
def test_sparse_series_ops(self):
import sys
buf = StringIO()
tmp = sys.stderr
sys.stderr = buf
try:
self._check_frame_ops(self.frame)
finally:
sys.stderr = tmp
def test_sparse_series_ops_i(self):
import sys
buf = StringIO()
tmp = sys.stderr
sys.stderr = buf
try:
self._check_frame_ops(self.iframe)
finally:
sys.stderr = tmp
def test_sparse_series_ops_z(self):
import sys
buf = StringIO()
tmp = sys.stderr
sys.stderr = buf
try:
self._check_frame_ops(self.zframe)
finally:
sys.stderr = tmp
def test_sparse_series_ops_fill(self):
import sys
buf = StringIO()
tmp = sys.stderr
sys.stderr = buf
try:
self._check_frame_ops(self.fill_frame)
finally:
sys.stderr = tmp
def _check_frame_ops(self, frame):
fill = frame.default_fill_value
def _compare_to_dense(a, b, da, db, op):
sparse_result = op(a, b)
dense_result = op(da, db)
dense_result = dense_result.to_sparse(fill_value=fill)
assert_sp_frame_equal(sparse_result, dense_result,
exact_indices=False)
if isinstance(a, DataFrame) and isinstance(db, DataFrame):
mixed_result = op(a, db)
tm.assert_isinstance(mixed_result, SparseDataFrame)
assert_sp_frame_equal(mixed_result, sparse_result,
exact_indices=False)
opnames = ['add', 'sub', 'mul', 'truediv', 'floordiv']
ops = [getattr(operator, name) for name in opnames]
fidx = frame.index
# time series operations
series = [frame['A'], frame['B'],
frame['C'], frame['D'],
frame['A'].reindex(fidx[:7]),
frame['A'].reindex(fidx[::2]),
SparseSeries([], index=[])]
for op in ops:
_compare_to_dense(frame, frame[::2], frame.to_dense(),
frame[::2].to_dense(), op)
for i, s in enumerate(series):
_compare_to_dense(frame, s, frame.to_dense(),
s.to_dense(), op)
_compare_to_dense(s, frame, s.to_dense(),
frame.to_dense(), op)
# cross-sectional operations
series = [frame.xs(fidx[0]),
frame.xs(fidx[3]),
frame.xs(fidx[5]),
frame.xs(fidx[7]),
frame.xs(fidx[5])[:2]]
for op in ops:
for s in series:
_compare_to_dense(frame, s, frame.to_dense(),
s, op)
_compare_to_dense(s, frame, s,
frame.to_dense(), op)
# it works!
result = self.frame + self.frame.ix[:, ['A', 'B']]
def test_op_corners(self):
empty = self.empty + self.empty
self.assertTrue(empty.empty)
foo = self.frame + self.empty
tm.assert_isinstance(foo.index, DatetimeIndex)
assert_frame_equal(foo, self.frame * np.nan)
foo = self.empty + self.frame
assert_frame_equal(foo, self.frame * np.nan)
def test_scalar_ops(self):
pass
def test_getitem(self):
# 1585 select multiple columns
sdf = SparseDataFrame(index=[0, 1, 2], columns=['a', 'b', 'c'])
result = sdf[['a', 'b']]
exp = sdf.reindex(columns=['a', 'b'])
assert_sp_frame_equal(result, exp)
self.assertRaises(Exception, sdf.__getitem__, ['a', 'd'])
def test_icol(self):
# 2227
result = self.frame.icol(0)
self.assertTrue(isinstance(result, SparseSeries))
assert_sp_series_equal(result, self.frame['A'])
# preserve sparse index type. #2251
data = {'A': [0, 1]}
iframe = SparseDataFrame(data, default_kind='integer')
self.assertEqual(type(iframe['A'].sp_index),
type(iframe.icol(0).sp_index))
def test_set_value(self):
# ok as the index gets conver to object
frame = self.frame.copy()
res = frame.set_value('foobar', 'B', 1.5)
self.assertEqual(res.index.dtype, 'object')
res = self.frame
res.index = res.index.astype(object)
res = self.frame.set_value('foobar', 'B', 1.5)
self.assertIsNot(res, self.frame)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res.get_value('foobar', 'B'), 1.5)
res2 = res.set_value('foobar', 'qux', 1.5)
self.assertIsNot(res2, res)
self.assert_numpy_array_equal(res2.columns,
list(self.frame.columns) + ['qux'])
self.assertEqual(res2.get_value('foobar', 'qux'), 1.5)
def test_fancy_index_misc(self):
# axis = 0
sliced = self.frame.ix[-2:, :]
expected = self.frame.reindex(index=self.frame.index[-2:])
assert_sp_frame_equal(sliced, expected)
# axis = 1
sliced = self.frame.ix[:, -2:]
expected = self.frame.reindex(columns=self.frame.columns[-2:])
assert_sp_frame_equal(sliced, expected)
def test_getitem_overload(self):
# slicing
sl = self.frame[:20]
assert_sp_frame_equal(sl, self.frame.reindex(self.frame.index[:20]))
# boolean indexing
d = self.frame.index[5]
indexer = self.frame.index > d
subindex = self.frame.index[indexer]
subframe = self.frame[indexer]
self.assert_numpy_array_equal(subindex, subframe.index)
self.assertRaises(Exception, self.frame.__getitem__, indexer[:-1])
def test_setitem(self):
def _check_frame(frame):
N = len(frame)
# insert SparseSeries
frame['E'] = frame['A']
tm.assert_isinstance(frame['E'], SparseSeries)
assert_sp_series_equal(frame['E'], frame['A'])
# insert SparseSeries differently-indexed
to_insert = frame['A'][::2]
frame['E'] = to_insert
expected = to_insert.to_dense().reindex(
frame.index).fillna(to_insert.fill_value)
result = frame['E'].to_dense()
assert_series_equal(result, expected, check_names=False)
self.assertEqual(result.name, 'E')
# insert Series
frame['F'] = frame['A'].to_dense()
tm.assert_isinstance(frame['F'], SparseSeries)
assert_sp_series_equal(frame['F'], frame['A'])
# insert Series differently-indexed
to_insert = frame['A'].to_dense()[::2]
frame['G'] = to_insert
expected = to_insert.reindex(
frame.index).fillna(frame.default_fill_value)
assert_series_equal(frame['G'].to_dense(), expected)
# insert ndarray
frame['H'] = np.random.randn(N)
tm.assert_isinstance(frame['H'], SparseSeries)
to_sparsify = np.random.randn(N)
to_sparsify[N // 2:] = frame.default_fill_value
frame['I'] = to_sparsify
self.assertEqual(len(frame['I'].sp_values), N // 2)
# insert ndarray wrong size
self.assertRaises(Exception, frame.__setitem__, 'foo',
np.random.randn(N - 1))
# scalar value
frame['J'] = 5
self.assertEqual(len(frame['J'].sp_values), N)
self.assertTrue((frame['J'].sp_values == 5).all())
frame['K'] = frame.default_fill_value
self.assertEqual(len(frame['K'].sp_values), 0)
self._check_all(_check_frame)
def test_setitem_corner(self):
self.frame['a'] = self.frame['B']
assert_sp_series_equal(self.frame['a'], self.frame['B'])
def test_setitem_array(self):
arr = self.frame['B']
self.frame['E'] = arr
assert_sp_series_equal(self.frame['E'], self.frame['B'])
self.frame['F'] = arr[:-1]
index = self.frame.index[:-1]
assert_sp_series_equal(
self.frame['E'].reindex(index), self.frame['F'].reindex(index))
def test_delitem(self):
A = self.frame['A']
C = self.frame['C']
del self.frame['B']
self.assertNotIn('B', self.frame)
assert_sp_series_equal(self.frame['A'], A)
assert_sp_series_equal(self.frame['C'], C)
del self.frame['D']
self.assertNotIn('D', self.frame)
del self.frame['A']
self.assertNotIn('A', self.frame)
def test_set_columns(self):
self.frame.columns = self.frame.columns
self.assertRaises(Exception, setattr, self.frame, 'columns',
self.frame.columns[:-1])
def test_set_index(self):
self.frame.index = self.frame.index
self.assertRaises(Exception, setattr, self.frame, 'index',
self.frame.index[:-1])
def test_append(self):
a = self.frame[:5]
b = self.frame[5:]
appended = a.append(b)
assert_sp_frame_equal(appended, self.frame, exact_indices=False)
a = self.frame.ix[:5, :3]
b = self.frame.ix[5:]
appended = a.append(b)
assert_sp_frame_equal(
appended.ix[:, :3], self.frame.ix[:, :3], exact_indices=False)
def test_apply(self):
applied = self.frame.apply(np.sqrt)
tm.assert_isinstance(applied, SparseDataFrame)
assert_almost_equal(applied.values, np.sqrt(self.frame.values))
applied = self.fill_frame.apply(np.sqrt)
self.assertEqual(applied['A'].fill_value, np.sqrt(2))
# agg / broadcast
broadcasted = self.frame.apply(np.sum, broadcast=True)
tm.assert_isinstance(broadcasted, SparseDataFrame)
assert_frame_equal(broadcasted.to_dense(),
self.frame.to_dense().apply(np.sum, broadcast=True))
self.assertIs(self.empty.apply(np.sqrt), self.empty)
from pandas.core import nanops
applied = self.frame.apply(np.sum)
assert_series_equal(applied,
self.frame.to_dense().apply(nanops.nansum))
def test_apply_nonuq(self):
df_orig = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
df = df_orig.to_sparse()
rs = df.apply(lambda s: s[0], axis=1)
xp = Series([1., 4., 7.], ['a', 'a', 'c'])
assert_series_equal(rs, xp)
# df.T breaks
df = df_orig.T.to_sparse()
rs = df.apply(lambda s: s[0], axis=0)
# no non-unique columns supported in sparse yet
# assert_series_equal(rs, xp)
def test_applymap(self):
# just test that it works
result = self.frame.applymap(lambda x: x * 2)
tm.assert_isinstance(result, SparseDataFrame)
def test_astype(self):
self.assertRaises(Exception, self.frame.astype, np.int64)
def test_fillna(self):
df = self.zframe.reindex(lrange(5))
result = df.fillna(0)
expected = df.to_dense().fillna(0).to_sparse(fill_value=0)
assert_sp_frame_equal(result, expected, exact_indices=False)
result = df.copy()
result.fillna(0, inplace=True)
expected = df.to_dense().fillna(0).to_sparse(fill_value=0)
assert_sp_frame_equal(result, expected, exact_indices=False)
result = df.copy()
result = df['A']
result.fillna(0, inplace=True)
assert_series_equal(result, df['A'].fillna(0))
def test_rename(self):
# just check this works
renamed = self.frame.rename(index=str)
renamed = self.frame.rename(columns=lambda x: '%s%d' % (x, len(x)))
def test_corr(self):
res = self.frame.corr()
assert_frame_equal(res, self.frame.to_dense().corr())
def test_describe(self):
self.frame['foo'] = np.nan
self.frame.get_dtype_counts()
str(self.frame)
desc = self.frame.describe()
def test_join(self):
left = self.frame.ix[:, ['A', 'B']]
right = self.frame.ix[:, ['C', 'D']]
joined = left.join(right)
assert_sp_frame_equal(joined, self.frame, exact_indices=False)
right = self.frame.ix[:, ['B', 'D']]
self.assertRaises(Exception, left.join, right)
with tm.assertRaisesRegexp(ValueError, 'Other Series must have a name'):
self.frame.join(Series(np.random.randn(len(self.frame)),
index=self.frame.index))
def test_reindex(self):
def _check_frame(frame):
index = frame.index
sidx = index[::2]
sidx2 = index[:5]
sparse_result = frame.reindex(sidx)
dense_result = frame.to_dense().reindex(sidx)
assert_frame_equal(sparse_result.to_dense(), dense_result)
assert_frame_equal(frame.reindex(list(sidx)).to_dense(),
dense_result)
sparse_result2 = sparse_result.reindex(index)
dense_result2 = dense_result.reindex(
index).fillna(frame.default_fill_value)
assert_frame_equal(sparse_result2.to_dense(), dense_result2)
# propagate CORRECT fill value
assert_almost_equal(sparse_result.default_fill_value,
frame.default_fill_value)
assert_almost_equal(sparse_result['A'].fill_value,
frame['A'].fill_value)
# length zero
length_zero = frame.reindex([])
self.assertEqual(len(length_zero), 0)
self.assertEqual(len(length_zero.columns), len(frame.columns))
self.assertEqual(len(length_zero['A']), 0)
# frame being reindexed has length zero
length_n = length_zero.reindex(index)
self.assertEqual(len(length_n), len(frame))
self.assertEqual(len(length_n.columns), len(frame.columns))
self.assertEqual(len(length_n['A']), len(frame))
# reindex columns
reindexed = frame.reindex(columns=['A', 'B', 'Z'])
self.assertEqual(len(reindexed.columns), 3)
assert_almost_equal(reindexed['Z'].fill_value,
frame.default_fill_value)
self.assertTrue(np.isnan(reindexed['Z'].sp_values).all())
_check_frame(self.frame)
_check_frame(self.iframe)
_check_frame(self.zframe)
_check_frame(self.fill_frame)
# with copy=False
reindexed = self.frame.reindex(self.frame.index, copy=False)
reindexed['F'] = reindexed['A']
self.assertIn('F', self.frame)
reindexed = self.frame.reindex(self.frame.index)
reindexed['G'] = reindexed['A']
self.assertNotIn('G', self.frame)
def test_reindex_fill_value(self):
rng = bdate_range('20110110', periods=20)
result = self.zframe.reindex(rng, fill_value=0)
expected = self.zframe.reindex(rng).fillna(0)
assert_sp_frame_equal(result, expected)
def test_take(self):
result = self.frame.take([1, 0, 2], axis=1)
expected = self.frame.reindex(columns=['B', 'A', 'C'])
assert_sp_frame_equal(result, expected)
def test_density(self):
df = SparseDataFrame({'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
'C': np.arange(10),
'D': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]})
self.assertEqual(df.density, 0.75)
def test_to_dense(self):
def _check(frame):
dense_dm = frame.to_dense()
assert_frame_equal(frame, dense_dm)
self._check_all(_check)
def test_stack_sparse_frame(self):
def _check(frame):
dense_frame = frame.to_dense()
wp = Panel.from_dict({'foo': frame})
from_dense_lp = wp.to_frame()
from_sparse_lp = spf.stack_sparse_frame(frame)
self.assert_numpy_array_equal(from_dense_lp.values,
from_sparse_lp.values)
_check(self.frame)
_check(self.iframe)
# for now
self.assertRaises(Exception, _check, self.zframe)
self.assertRaises(Exception, _check, self.fill_frame)
def test_transpose(self):
def _check(frame):
transposed = frame.T
untransposed = transposed.T
assert_sp_frame_equal(frame, untransposed)
self._check_all(_check)
def test_shift(self):
def _check(frame):
shifted = frame.shift(0)
assert_sp_frame_equal(shifted, frame)
f = lambda s: s.shift(1)
_dense_frame_compare(frame, f)
f = lambda s: s.shift(-2)
_dense_frame_compare(frame, f)
f = lambda s: s.shift(2, freq='B')
_dense_frame_compare(frame, f)
f = lambda s: s.shift(2, freq=datetools.bday)
_dense_frame_compare(frame, f)
self._check_all(_check)
def test_count(self):
result = self.frame.count()
dense_result = self.frame.to_dense().count()
assert_series_equal(result, dense_result)
result = self.frame.count(1)
dense_result = self.frame.to_dense().count(1)
# win32 don't check dtype
assert_series_equal(result, dense_result, check_dtype=False)
def test_cumsum(self):
result = self.frame.cumsum()
expected = self.frame.to_dense().cumsum()
tm.assert_isinstance(result, SparseDataFrame)
assert_frame_equal(result.to_dense(), expected)
def _check_all(self, check_func):
check_func(self.frame)
check_func(self.iframe)
check_func(self.zframe)
check_func(self.fill_frame)
def test_combine_first(self):
df = self.frame
result = df[::2].combine_first(df)
result2 = df[::2].combine_first(df.to_dense())
expected = df[::2].to_dense().combine_first(df.to_dense())
expected = expected.to_sparse(fill_value=df.default_fill_value)
assert_sp_frame_equal(result, result2)
assert_sp_frame_equal(result, expected)
def test_combine_add(self):
df = self.frame.to_dense()
df2 = df.copy()
df2['C'][:3] = np.nan
df['A'][:3] = 5.7
result = df.to_sparse().add(df2.to_sparse(), fill_value=0)
expected = df.add(df2, fill_value=0).to_sparse()
assert_sp_frame_equal(result, expected)
def test_isin(self):
sparse_df = DataFrame({'flag': [1., 0., 1.]}).to_sparse(fill_value=0.)
xp = sparse_df[sparse_df.flag == 1.]
rs = sparse_df[sparse_df.flag.isin([1.])]
assert_frame_equal(xp, rs)
def test_sparse_pow_issue(self):
# 2220
df = SparseDataFrame({'A': [1.1, 3.3], 'B': [2.5, -3.9]})
# note : no error without nan
df = SparseDataFrame({'A': [nan, 0, 1]})
# note that 2 ** df works fine, also df ** 1
result = 1 ** df
r1 = result.take([0], 1)['A']
r2 = result['A']
self.assertEqual(len(r2.sp_values), len(r1.sp_values))
def test_as_blocks(self):
df = SparseDataFrame({'A': [1.1, 3.3], 'B': [nan, -3.9]},
dtype='float64')
df_blocks = df.blocks
self.assertEqual(list(df_blocks.keys()), ['float64'])
assert_frame_equal(df_blocks['float64'], df)
def test_nan_columnname(self):
# GH 8822
nan_colname = DataFrame(Series(1.0,index=[0]),columns=[nan])
nan_colname_sparse = nan_colname.to_sparse()
self.assertTrue(np.isnan(nan_colname_sparse.columns[0]))
def _dense_series_compare(s, f):
result = f(s)
assert(isinstance(result, SparseSeries))
dense_result = f(s.to_dense())
assert_series_equal(result.to_dense(), dense_result)
def _dense_frame_compare(frame, f):
result = f(frame)
assert(isinstance(frame, SparseDataFrame))
dense_result = f(frame.to_dense()).fillna(frame.default_fill_value)
assert_frame_equal(result.to_dense(), dense_result)
def panel_data1():
index = bdate_range('1/1/2011', periods=8)
return DataFrame({
'A': [nan, nan, nan, 0, 1, 2, 3, 4],
'B': [0, 1, 2, 3, 4, nan, nan, nan],
'C': [0, 1, 2, nan, nan, nan, 3, 4],
'D': [nan, 0, 1, nan, 2, 3, 4, nan]
}, index=index)
def panel_data2():
index = bdate_range('1/1/2011', periods=9)
return DataFrame({
'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5],
'B': [0, 1, 2, 3, 4, 5, nan, nan, nan],
'C': [0, 1, 2, nan, nan, nan, 3, 4, 5],
'D': [nan, 0, 1, nan, 2, 3, 4, 5, nan]
}, index=index)
def panel_data3():
index = bdate_range('1/1/2011', periods=10).shift(-2)
return DataFrame({
'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
'B': [0, 1, 2, 3, 4, 5, 6, nan, nan, nan],
'C': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
'D': [nan, 0, 1, nan, 2, 3, 4, 5, 6, nan]
}, index=index)
class TestSparsePanel(tm.TestCase,
test_panel.SafeForLongAndSparse,
test_panel.SafeForSparse):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_sp_panel_equal(x, y)
def setUp(self):
self.data_dict = {
'ItemA': panel_data1(),
'ItemB': panel_data2(),
'ItemC': panel_data3(),
'ItemD': panel_data1(),
}
self.panel = SparsePanel(self.data_dict)
@staticmethod
def _test_op(panel, op):
# arithmetic tests
result = op(panel, 1)
assert_sp_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_constructor(self):
self.assertRaises(ValueError, SparsePanel, self.data_dict,
items=['Item0', 'ItemA', 'ItemB'])
with tm.assertRaisesRegexp(TypeError,
"input must be a dict, a 'list' was passed"):
SparsePanel(['a', 'b', 'c'])
# GH 9272
def test_constructor_empty(self):
sp = SparsePanel()
self.assertEqual(len(sp.items), 0)
self.assertEqual(len(sp.major_axis), 0)
self.assertEqual(len(sp.minor_axis), 0)
def test_from_dict(self):
fd = SparsePanel.from_dict(self.data_dict)
assert_sp_panel_equal(fd, self.panel)
def test_pickle(self):
def _test_roundtrip(panel):
result = self.round_trip_pickle(panel)
tm.assert_isinstance(result.items, Index)
tm.assert_isinstance(result.major_axis, Index)
tm.assert_isinstance(result.minor_axis, Index)
assert_sp_panel_equal(panel, result)
_test_roundtrip(self.panel)
def test_dense_to_sparse(self):
wp = Panel.from_dict(self.data_dict)
dwp = wp.to_sparse()
tm.assert_isinstance(dwp['ItemA']['A'], SparseSeries)
def test_to_dense(self):
dwp = self.panel.to_dense()
dwp2 = Panel.from_dict(self.data_dict)
assert_panel_equal(dwp, dwp2)
def test_to_frame(self):
def _compare_with_dense(panel):
slp = panel.to_frame()
dlp = panel.to_dense().to_frame()
self.assert_numpy_array_equal(slp.values, dlp.values)
self.assertTrue(slp.index.equals(dlp.index))
_compare_with_dense(self.panel)
_compare_with_dense(self.panel.reindex(items=['ItemA']))
zero_panel = SparsePanel(self.data_dict, default_fill_value=0)
self.assertRaises(Exception, zero_panel.to_frame)
self.assertRaises(Exception, self.panel.to_frame,
filter_observations=False)
def test_long_to_wide_sparse(self):
pass
def test_values(self):
pass
def test_setitem(self):
self.panel['ItemE'] = self.panel['ItemC']
self.panel['ItemF'] = self.panel['ItemC'].to_dense()
assert_sp_frame_equal(self.panel['ItemE'], self.panel['ItemC'])
assert_sp_frame_equal(self.panel['ItemF'], self.panel['ItemC'])
assert_almost_equal(self.panel.items, ['ItemA', 'ItemB', 'ItemC',
'ItemD', 'ItemE', 'ItemF'])
self.assertRaises(Exception, self.panel.__setitem__, 'item6', 1)
def test_set_value(self):
def _check_loc(item, major, minor, val=1.5):
res = self.panel.set_value(item, major, minor, val)
self.assertIsNot(res, self.panel)
self.assertEqual(res.get_value(item, major, minor), val)
_check_loc('ItemA', self.panel.major_axis[4], self.panel.minor_axis[3])
_check_loc('ItemF', self.panel.major_axis[4], self.panel.minor_axis[3])
_check_loc('ItemF', 'foo', self.panel.minor_axis[3])
_check_loc('ItemE', 'foo', 'bar')
def test_delitem_pop(self):
del self.panel['ItemB']
assert_almost_equal(self.panel.items, ['ItemA', 'ItemC', 'ItemD'])
crackle = self.panel['ItemC']
pop = self.panel.pop('ItemC')
self.assertIs(pop, crackle)
assert_almost_equal(self.panel.items, ['ItemA', 'ItemD'])
self.assertRaises(KeyError, self.panel.__delitem__, 'ItemC')
def test_copy(self):
cop = self.panel.copy()
assert_sp_panel_equal(cop, self.panel)
def test_reindex(self):
def _compare_with_dense(swp, items, major, minor):
swp_re = swp.reindex(items=items, major=major,
minor=minor)
dwp_re = swp.to_dense().reindex(items=items, major=major,
minor=minor)
assert_panel_equal(swp_re.to_dense(), dwp_re)
_compare_with_dense(self.panel, self.panel.items[:2],
self.panel.major_axis[::2],
self.panel.minor_axis[::2])
_compare_with_dense(self.panel, None,
self.panel.major_axis[::2],
self.panel.minor_axis[::2])
self.assertRaises(ValueError, self.panel.reindex)
# TODO: do something about this later...
self.assertRaises(Exception, self.panel.reindex,
items=['item0', 'ItemA', 'ItemB'])
# test copying
cp = self.panel.reindex(self.panel.major_axis, copy=True)
cp['ItemA']['E'] = cp['ItemA']['A']
self.assertNotIn('E', self.panel['ItemA'])
def test_operators(self):
def _check_ops(panel):
def _dense_comp(op):
dense = panel.to_dense()
sparse_result = op(panel)
dense_result = op(dense)
assert_panel_equal(sparse_result.to_dense(), dense_result)
def _mixed_comp(op):
result = op(panel, panel.to_dense())
expected = op(panel.to_dense(), panel.to_dense())
assert_panel_equal(result, expected)
op1 = lambda x: x + 2
_dense_comp(op1)
op2 = lambda x: x.add(x.reindex(major=x.major_axis[::2]))
_dense_comp(op2)
op3 = lambda x: x.subtract(x.mean(0), axis=0)
_dense_comp(op3)
op4 = lambda x: x.subtract(x.mean(1), axis=1)
_dense_comp(op4)
op5 = lambda x: x.subtract(x.mean(2), axis=2)
_dense_comp(op5)
_mixed_comp(Panel.multiply)
_mixed_comp(Panel.subtract)
# TODO: this case not yet supported!
# op6 = lambda x: x.add(x.to_frame())
# _dense_comp(op6)
_check_ops(self.panel)
def test_major_xs(self):
def _dense_comp(sparse):
dense = sparse.to_dense()
for idx in sparse.major_axis:
dslice = dense.major_xs(idx)
sslice = sparse.major_xs(idx)
assert_frame_equal(dslice, sslice)
_dense_comp(self.panel)
def test_minor_xs(self):
def _dense_comp(sparse):
dense = sparse.to_dense()
for idx in sparse.minor_axis:
dslice = dense.minor_xs(idx)
sslice = sparse.minor_xs(idx).to_dense()
assert_frame_equal(dslice, sslice)
_dense_comp(self.panel)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
# nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure',
# '--with-profile'],
# exit=False)
|
mit
|
arabenjamin/scikit-learn
|
examples/cluster/plot_mini_batch_kmeans.py
|
265
|
4081
|
"""
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
##############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
k_means_labels = k_means.labels_
k_means_cluster_centers = k_means.cluster_centers_
k_means_labels_unique = np.unique(k_means_labels)
##############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
mbk_means_labels = mbk.labels_
mbk_means_cluster_centers = mbk.cluster_centers_
mbk_means_labels_unique = np.unique(mbk_means_labels)
##############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for l in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
|
bsd-3-clause
|
Renata1995/Rules-and-Stats-in-AGL
|
data_analysis/main_effect.py
|
1
|
4407
|
from info_extraction import InfoExtractor
import os
from scipy import stats
import matplotlib.pyplot as plt
# open all files
ie = InfoExtractor()
src = "CFG"
# g items
re_letter_g = []
re_color_g = []
re_all_g = []
for fname in os.listdir(src):
filename = src + "/" + fname
test_data_letter, test_data_color, test_data = ie.test_data(filename)
letter_ap, color_ap, overall_ap = ie.accuracy(test_data_letter, test_data_color)
re_letter_g.append(letter_ap["g_percent"])
re_color_g.append(color_ap["g_percent"])
re_all_g.append(overall_ap["g_percent"])
# ug items
re_letter_ug = []
re_color_ug = []
re_all_ug = []
for fname in os.listdir(src):
filename = src + "/" + fname
test_data_letter, test_data_color, test_data = ie.test_data(filename)
letter_ap, color_ap, overall_ap = ie.accuracy(test_data_letter, test_data_color)
re_letter_ug.append(letter_ap["ug_percent"])
re_color_ug.append(color_ap["ug_percent"])
re_all_ug.append(overall_ap["ug_percent"])
# construct a comparison list
re_comp = []
for item in re_letter_g:
re_comp.append(0.5)
src = "CFG_R"
rec_letter_g = []
rec_color_g = []
rec_all_g = []
for fname in os.listdir(src):
filename = src + "/" + fname
test_data_letter, test_data_color, test_data = ie.test_data(filename)
letter_ap, color_ap, overall_ap = ie.accuracy(test_data_letter, test_data_color)
rec_letter_g.append(letter_ap["g_percent"])
rec_color_g.append(color_ap["g_percent"])
rec_all_g.append(overall_ap["g_percent"])
# ug items
rec_letter_ug = []
rec_color_ug = []
rec_all_ug = []
for fname in os.listdir(src):
filename = src + "/" + fname
test_data_letter, test_data_color, test_data = ie.test_data(filename)
letter_ap, color_ap, overall_ap = ie.accuracy(test_data_letter, test_data_color)
rec_letter_ug.append(letter_ap["ug_percent"])
rec_color_ug.append(color_ap["ug_percent"])
rec_all_ug.append(overall_ap["ug_percent"])
# construct a comparison list
rec_comp = []
for item in rec_letter_g:
rec_comp.append(0.5)
# Whether RE - G items are different than 50%
lresult = stats.ttest_ind(re_letter_g, re_comp)
cresult = stats.ttest_ind(re_color_g, re_comp)
allresult = stats.ttest_ind(re_all_g, re_comp)
print "EXP Letter-G: " + str(lresult) + " Color-G: " + str(cresult) + " All-G: " + str(allresult)
lresult = stats.ttest_ind(re_letter_ug, re_comp)
cresult = stats.ttest_ind(re_color_ug, re_comp)
allresult = stats.ttest_ind(re_all_ug, re_comp)
print "EXP Letter-UG: " + str(lresult) + " Color-UG: " + str(cresult) + " All-UG: " + str(allresult)
lresult = stats.ttest_ind(rec_letter_g, rec_comp)
cresult = stats.ttest_ind(rec_color_g, rec_comp)
allresult = stats.ttest_ind(rec_all_g, rec_comp)
print "CONTROL Letter-G: " + str(lresult) + " Color-G: " + str(cresult) + " All-G: " + str(allresult)
lresult = stats.ttest_ind(rec_letter_ug, re_comp)
cresult = stats.ttest_ind(rec_color_ug, re_comp)
allresult = stats.ttest_ind(rec_all_ug, re_comp)
print "CONTROL Letter-UG: " + str(lresult) + " Color-UG: " + str(cresult) + " All-UG: " + str(allresult)
all_letter = re_letter_g + re_letter_ug
control_all_letter = rec_letter_g + rec_letter_ug
result = stats.ttest_ind(re_letter_g, rec_letter_g)
print "Letter"
print "Comp G: " + str(result)
result = stats.ttest_ind(re_letter_ug, rec_letter_ug)
print "Comp UG: " + str(result)
result = stats.ttest_ind(all_letter, control_all_letter)
print "Comp ALL: " + str(result)
result = stats.ttest_ind(all_letter, re_comp*2)
print "Comp EXP and 50%: " + str(result)
result = stats.ttest_ind(control_all_letter, re_comp)
print "Comp CONTROL and 50%: " + str(result)
all_color = re_color_g + re_color_ug
control_all_color = rec_color_g + rec_color_ug
result = stats.ttest_ind(re_color_g, rec_color_g)
print "Color"
print "Comp G: " + str(result)
result = stats.ttest_ind(re_color_ug, rec_color_ug)
print "Comp UG: " + str(result)
result = stats.ttest_ind(all_color, control_all_color)
print "Comp ALL: " + str(result)
result = stats.ttest_ind(all_color, re_comp*2)
print "Comp EXP and 50%: " + str(result)
result = stats.ttest_ind(control_all_color, re_comp)
print "Comp CONTROL and 50%: " + str(result)
# # plot
# plot_list = re_all_g + re_all_ug
# x_axis = range(1, len(plot_list)+1)
# y_axis = [item*100 for item in plot_list]
#
# line_pos, = plt.plot(x_axis, y_axis, color=(0, 0, 1), marker="o")
# plt.show()
|
apache-2.0
|
aetilley/scikit-learn
|
examples/linear_model/plot_sgd_separating_hyperplane.py
|
260
|
1219
|
"""
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
arjoly/scikit-learn
|
examples/preprocessing/plot_robust_scaling.py
|
221
|
2702
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Robust Scaling on Toy Data
=========================================================
Making sure that each Feature has approximately the same scale can be a
crucial preprocessing step. However, when data contains outliers,
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` can often
be mislead. In such cases, it is better to use a scaler that is robust
against outliers.
Here, we demonstrate this on a toy dataset, where one single datapoint
is a large outlier.
"""
from __future__ import print_function
print(__doc__)
# Code source: Thomas Unterthiner
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler, RobustScaler
# Create training and test data
np.random.seed(42)
n_datapoints = 100
Cov = [[0.9, 0.0], [0.0, 20.0]]
mu1 = [100.0, -3.0]
mu2 = [101.0, -3.0]
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_train = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_train = np.vstack([X1, X2])
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_test = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_test = np.vstack([X1, X2])
X_train[0, 0] = -1000 # a fairly large outlier
# Scale data
standard_scaler = StandardScaler()
Xtr_s = standard_scaler.fit_transform(X_train)
Xte_s = standard_scaler.transform(X_test)
robust_scaler = RobustScaler()
Xtr_r = robust_scaler.fit_transform(X_train)
Xte_r = robust_scaler.fit_transform(X_test)
# Plot data
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
ax[0].scatter(X_train[:, 0], X_train[:, 1],
color=np.where(Y_train > 0, 'r', 'b'))
ax[1].scatter(Xtr_s[:, 0], Xtr_s[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[2].scatter(Xtr_r[:, 0], Xtr_r[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[0].set_title("Unscaled data")
ax[1].set_title("After standard scaling (zoomed in)")
ax[2].set_title("After robust scaling (zoomed in)")
# for the scaled data, we zoom in to the data center (outlier can't be seen!)
for a in ax[1:]:
a.set_xlim(-3, 3)
a.set_ylim(-3, 3)
plt.tight_layout()
plt.show()
# Classify using k-NN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(Xtr_s, Y_train)
acc_s = knn.score(Xte_s, Y_test)
print("Testset accuracy using standard scaler: %.3f" % acc_s)
knn.fit(Xtr_r, Y_train)
acc_r = knn.score(Xte_r, Y_test)
print("Testset accuracy using robust scaler: %.3f" % acc_r)
|
bsd-3-clause
|
mojones/Axelrod
|
axelrod/tournament_manager.py
|
1
|
6772
|
from __future__ import absolute_import, unicode_literals, print_function
import os
import cloudpickle as pickle
from .tournament import *
from .plot import *
from .ecosystem import *
from .utils import *
class TournamentManager(object):
def __init__(self, output_directory, with_ecological,
pass_cache=True, load_cache=True, save_cache=False,
cache_file='./cache.txt'):
self._tournaments = []
self._ecological_variants = []
self._logger = logging.getLogger(__name__)
self._output_directory = output_directory
self._with_ecological = with_ecological
self._pass_cache = pass_cache
self._save_cache = save_cache
self._cache_file = cache_file
self._deterministic_cache = {}
self._cache_valid_for_turns = None
self._load_cache = False
if load_cache and not save_cache:
self.load_cache = self._load_cache_from_file(cache_file)
@staticmethod
def one_player_per_strategy(strategies):
return [strategy() for strategy in strategies]
def add_tournament(self, name, players, game=None, turns=200,
repetitions=10, processes=None, noise=0,
with_morality=True):
tournament = Tournament(
name=name,
players=players,
turns=turns,
repetitions=repetitions,
processes=processes,
noise=noise,
with_morality=with_morality)
self._tournaments.append(tournament)
def run_tournaments(self):
t0 = time.time()
for tournament in self._tournaments:
self._run_single_tournament(tournament)
if self._save_cache and not tournament.noise:
self._save_cache_to_file(self._deterministic_cache, self._cache_file)
self._logger.info(timed_message('Finished all tournaments', t0))
def _run_single_tournament(self, tournament):
self._logger.info(
'Starting %s tournament with %d round robins of %d turns per pair.'
% (tournament.name, tournament.repetitions, tournament.turns))
t0 = time.time()
if not tournament.noise and self._pass_cache and self._valid_cache(tournament.turns):
self._logger.debug('Passing cache with %d entries to %s tournament' %
(len(self._deterministic_cache), tournament.name))
tournament.deterministic_cache = self._deterministic_cache
if self._load_cache:
tournament.prebuilt_cache = True
else:
self._logger.debug('Cache is not valid for %s tournament' %
tournament.name)
tournament.play()
self._logger.debug(timed_message('Finished %s tournament' % tournament.name, t0))
if self._with_ecological:
ecosystem = Ecosystem(tournament.result_set)
self.run_ecological_variant(tournament, ecosystem)
else:
ecosystem = None
self._generate_output_files(tournament, ecosystem)
self._cache_valid_for_turns = tournament.turns
self._logger.debug('Cache now has %d entries' %
len(self._deterministic_cache))
self._logger.info(
timed_message('Finished all %s tasks' % tournament.name, t0))
def _valid_cache(self, turns):
return ((len(self._deterministic_cache) == 0) or
(len(self._deterministic_cache) > 0) and
turns == self._cache_valid_for_turns)
def run_ecological_variant(self, tournament, ecosystem):
self._logger.debug(
'Starting ecological variant of %s' % tournament.name)
t0 = time.time()
ecoturns = {
'basic_strategies': 1000,
'cheating_strategies': 10,
'strategies': 1000,
'all_strategies': 10,
}
ecosystem.reproduce(ecoturns.get(tournament.name))
self._logger.debug(
timed_message('Finished ecological variant of %s' % tournament.name, t0))
def _generate_output_files(self, tournament, ecosystem=None):
self._save_csv(tournament)
self._save_plots(tournament, ecosystem)
def _save_csv(self, tournament):
csv = tournament.result_set.csv()
file_name = self._output_file_path(
tournament.name, 'csv')
with open(file_name, 'w') as f:
f.write(csv)
def _save_plots(self, tournament, ecosystem=None, image_format="svg"):
results = tournament.result_set
plot = Plot(results)
if not plot.matplotlib_installed:
self._logger.error('The matplotlib library is not installed. '
'No plots will be produced')
return
for plot_type in ('boxplot', 'payoff', 'winplot', 'sdvplot', 'pdplot'):
figure = getattr(plot, plot_type)()
file_name = self._output_file_path(
tournament.name + '_' + plot_type, image_format)
self._save_plot(figure, file_name)
if ecosystem is not None:
figure = plot.stackplot(ecosystem.population_sizes)
file_name = self._output_file_path(
tournament.name + '_reproduce', image_format)
self._save_plot(figure, file_name)
def _output_file_path(self, file_name, file_extension):
return os.path.join(
self._output_directory,
file_name + '.' + file_extension)
@staticmethod
def _save_plot(figure, file_name, dpi=400):
figure.savefig(file_name, bbox_inches='tight', dpi=dpi)
figure.clf()
plt.close(figure)
def _save_cache_to_file(self, cache, file_name):
self._logger.debug(
'Saving cache with %d entries to %s' % (len(cache), file_name))
deterministic_cache = DeterministicCache(
cache, self._cache_valid_for_turns)
with open(file_name, 'wb') as io:
pickle.dump(deterministic_cache, io)
return True
def _load_cache_from_file(self, file_name):
try:
with open(file_name, 'rb') as io:
deterministic_cache = pickle.load(io)
self._deterministic_cache = deterministic_cache.cache
self._cache_valid_for_turns = deterministic_cache.turns
self._logger.debug(
'Loaded cache with %d entries' % len(self._deterministic_cache))
return True
except IOError:
self._logger.debug('Cache file not found. Starting with empty cache')
return False
class DeterministicCache(object):
def __init__(self, cache, turns):
self.cache = cache
self.turns = turns
|
mit
|
kashif/scikit-learn
|
sklearn/metrics/setup.py
|
299
|
1024
|
import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.c"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
|
bsd-3-clause
|
Prasad9/incubator-mxnet
|
example/gan/dcgan.py
|
24
|
10758
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import mxnet as mx
import numpy as np
from sklearn.datasets import fetch_mldata
from matplotlib import pyplot as plt
import logging
import cv2
from datetime import datetime
def make_dcgan_sym(ngf, ndf, nc, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12):
BatchNorm = mx.sym.BatchNorm
rand = mx.sym.Variable('rand')
g1 = mx.sym.Deconvolution(rand, name='g1', kernel=(4,4), num_filter=ngf*8, no_bias=no_bias)
gbn1 = BatchNorm(g1, name='gbn1', fix_gamma=fix_gamma, eps=eps)
gact1 = mx.sym.Activation(gbn1, name='gact1', act_type='relu')
g2 = mx.sym.Deconvolution(gact1, name='g2', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=ngf*4, no_bias=no_bias)
gbn2 = BatchNorm(g2, name='gbn2', fix_gamma=fix_gamma, eps=eps)
gact2 = mx.sym.Activation(gbn2, name='gact2', act_type='relu')
g3 = mx.sym.Deconvolution(gact2, name='g3', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=ngf*2, no_bias=no_bias)
gbn3 = BatchNorm(g3, name='gbn3', fix_gamma=fix_gamma, eps=eps)
gact3 = mx.sym.Activation(gbn3, name='gact3', act_type='relu')
g4 = mx.sym.Deconvolution(gact3, name='g4', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=ngf, no_bias=no_bias)
gbn4 = BatchNorm(g4, name='gbn4', fix_gamma=fix_gamma, eps=eps)
gact4 = mx.sym.Activation(gbn4, name='gact4', act_type='relu')
g5 = mx.sym.Deconvolution(gact4, name='g5', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=nc, no_bias=no_bias)
gout = mx.sym.Activation(g5, name='gact5', act_type='tanh')
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
d1 = mx.sym.Convolution(data, name='d1', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=ndf, no_bias=no_bias)
dact1 = mx.sym.LeakyReLU(d1, name='dact1', act_type='leaky', slope=0.2)
d2 = mx.sym.Convolution(dact1, name='d2', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=ndf*2, no_bias=no_bias)
dbn2 = BatchNorm(d2, name='dbn2', fix_gamma=fix_gamma, eps=eps)
dact2 = mx.sym.LeakyReLU(dbn2, name='dact2', act_type='leaky', slope=0.2)
d3 = mx.sym.Convolution(dact2, name='d3', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=ndf*4, no_bias=no_bias)
dbn3 = BatchNorm(d3, name='dbn3', fix_gamma=fix_gamma, eps=eps)
dact3 = mx.sym.LeakyReLU(dbn3, name='dact3', act_type='leaky', slope=0.2)
d4 = mx.sym.Convolution(dact3, name='d4', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=ndf*8, no_bias=no_bias)
dbn4 = BatchNorm(d4, name='dbn4', fix_gamma=fix_gamma, eps=eps)
dact4 = mx.sym.LeakyReLU(dbn4, name='dact4', act_type='leaky', slope=0.2)
d5 = mx.sym.Convolution(dact4, name='d5', kernel=(4,4), num_filter=1, no_bias=no_bias)
d5 = mx.sym.Flatten(d5)
dloss = mx.sym.LogisticRegressionOutput(data=d5, label=label, name='dloss')
return gout, dloss
def get_mnist():
mnist = fetch_mldata('MNIST original')
np.random.seed(1234) # set seed for deterministic ordering
p = np.random.permutation(mnist.data.shape[0])
X = mnist.data[p]
X = X.reshape((70000, 28, 28))
X = np.asarray([cv2.resize(x, (64,64)) for x in X])
X = X.astype(np.float32)/(255.0/2) - 1.0
X = X.reshape((70000, 1, 64, 64))
X = np.tile(X, (1, 3, 1, 1))
X_train = X[:60000]
X_test = X[60000:]
return X_train, X_test
class RandIter(mx.io.DataIter):
def __init__(self, batch_size, ndim):
self.batch_size = batch_size
self.ndim = ndim
self.provide_data = [('rand', (batch_size, ndim, 1, 1))]
self.provide_label = []
def iter_next(self):
return True
def getdata(self):
return [mx.random.normal(0, 1.0, shape=(self.batch_size, self.ndim, 1, 1))]
class ImagenetIter(mx.io.DataIter):
def __init__(self, path, batch_size, data_shape):
self.internal = mx.io.ImageRecordIter(
path_imgrec = path,
data_shape = data_shape,
batch_size = batch_size,
rand_crop = True,
rand_mirror = True,
max_crop_size = 256,
min_crop_size = 192)
self.provide_data = [('data', (batch_size,) + data_shape)]
self.provide_label = []
def reset(self):
self.internal.reset()
def iter_next(self):
return self.internal.iter_next()
def getdata(self):
data = self.internal.getdata()
data = data * (2.0/255.0)
data -= 1
return [data]
def fill_buf(buf, i, img, shape):
n = buf.shape[0]/shape[1]
m = buf.shape[1]/shape[0]
sx = (i%m)*shape[0]
sy = (i/m)*shape[1]
buf[sy:sy+shape[1], sx:sx+shape[0], :] = img
def visual(title, X):
assert len(X.shape) == 4
X = X.transpose((0, 2, 3, 1))
X = np.clip((X+1.0)*(255.0/2.0), 0, 255).astype(np.uint8)
n = np.ceil(np.sqrt(X.shape[0]))
buff = np.zeros((int(n*X.shape[1]), int(n*X.shape[2]), int(X.shape[3])), dtype=np.uint8)
for i, img in enumerate(X):
fill_buf(buff, i, img, X.shape[1:3])
buff = cv2.cvtColor(buff, cv2.COLOR_BGR2RGB)
plt.imshow(buff)
plt.title(title)
plt.show()
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
# =============setting============
dataset = 'mnist'
imgnet_path = './train.rec'
ndf = 64
ngf = 64
nc = 3
batch_size = 64
Z = 100
lr = 0.0002
beta1 = 0.5
ctx = mx.gpu(0)
check_point = False
symG, symD = make_dcgan_sym(ngf, ndf, nc)
#mx.viz.plot_network(symG, shape={'rand': (batch_size, 100, 1, 1)}).view()
#mx.viz.plot_network(symD, shape={'data': (batch_size, nc, 64, 64)}).view()
# ==============data==============
if dataset == 'mnist':
X_train, X_test = get_mnist()
train_iter = mx.io.NDArrayIter(X_train, batch_size=batch_size)
elif dataset == 'imagenet':
train_iter = ImagenetIter(imgnet_path, batch_size, (3, 64, 64))
rand_iter = RandIter(batch_size, Z)
label = mx.nd.zeros((batch_size,), ctx=ctx)
# =============module G=============
modG = mx.mod.Module(symbol=symG, data_names=('rand',), label_names=None, context=ctx)
modG.bind(data_shapes=rand_iter.provide_data)
modG.init_params(initializer=mx.init.Normal(0.02))
modG.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'wd': 0.,
'beta1': beta1,
})
mods = [modG]
# =============module D=============
modD = mx.mod.Module(symbol=symD, data_names=('data',), label_names=('label',), context=ctx)
modD.bind(data_shapes=train_iter.provide_data,
label_shapes=[('label', (batch_size,))],
inputs_need_grad=True)
modD.init_params(initializer=mx.init.Normal(0.02))
modD.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'wd': 0.,
'beta1': beta1,
})
mods.append(modD)
# ============printing==============
def norm_stat(d):
return mx.nd.norm(d)/np.sqrt(d.size)
mon = mx.mon.Monitor(10, norm_stat, pattern=".*output|d1_backward_data", sort=True)
mon = None
if mon is not None:
for mod in mods:
pass
def facc(label, pred):
pred = pred.ravel()
label = label.ravel()
return ((pred > 0.5) == label).mean()
def fentropy(label, pred):
pred = pred.ravel()
label = label.ravel()
return -(label*np.log(pred+1e-12) + (1.-label)*np.log(1.-pred+1e-12)).mean()
mG = mx.metric.CustomMetric(fentropy)
mD = mx.metric.CustomMetric(fentropy)
mACC = mx.metric.CustomMetric(facc)
print('Training...')
stamp = datetime.now().strftime('%Y_%m_%d-%H_%M')
# =============train===============
for epoch in range(100):
train_iter.reset()
for t, batch in enumerate(train_iter):
rbatch = rand_iter.next()
if mon is not None:
mon.tic()
modG.forward(rbatch, is_train=True)
outG = modG.get_outputs()
# update discriminator on fake
label[:] = 0
modD.forward(mx.io.DataBatch(outG, [label]), is_train=True)
modD.backward()
#modD.update()
gradD = [[grad.copyto(grad.context) for grad in grads] for grads in modD._exec_group.grad_arrays]
modD.update_metric(mD, [label])
modD.update_metric(mACC, [label])
# update discriminator on real
label[:] = 1
batch.label = [label]
modD.forward(batch, is_train=True)
modD.backward()
for gradsr, gradsf in zip(modD._exec_group.grad_arrays, gradD):
for gradr, gradf in zip(gradsr, gradsf):
gradr += gradf
modD.update()
modD.update_metric(mD, [label])
modD.update_metric(mACC, [label])
# update generator
label[:] = 1
modD.forward(mx.io.DataBatch(outG, [label]), is_train=True)
modD.backward()
diffD = modD.get_input_grads()
modG.backward(diffD)
modG.update()
mG.update([label], modD.get_outputs())
if mon is not None:
mon.toc_print()
t += 1
if t % 10 == 0:
print('epoch:', epoch, 'iter:', t, 'metric:', mACC.get(), mG.get(), mD.get())
mACC.reset()
mG.reset()
mD.reset()
visual('gout', outG[0].asnumpy())
diff = diffD[0].asnumpy()
diff = (diff - diff.mean())/diff.std()
visual('diff', diff)
visual('data', batch.data[0].asnumpy())
if check_point:
print('Saving...')
modG.save_params('%s_G_%s-%04d.params'%(dataset, stamp, epoch))
modD.save_params('%s_D_%s-%04d.params'%(dataset, stamp, epoch))
|
apache-2.0
|
buck06191/BayesCMD
|
bayescmd/abc/summary_stats.py
|
1
|
6755
|
# -*- coding: utf-8 -*-
"""Use to generate summary statistics in simulated and real time series.
Attributes
----------
SUMMARY : dict
Dictionary containing the distance aliases, mapping to the functions.
"""
import numpy as np
import pandas as pd
import scipy.stats as stats
import seaborn as sns
import sys
import os
import matplotlib. pyplot as plt
# sys.path.append(os.path.abspath(os.path.dirname(__file__)))
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class ZeroArrayError(Error):
"""Exception raised for errors in the zero array."""
pass
class SummaryStats:
"""
Generate summary statistics from time series data.
This class will find a range of summary statistics for time series data.
Parameters
----------
actual_data : dict
Dictionary of data, as generated by
:meth:`bayescmd.abc.data_import.import_actual_data` or
:meth:`bayescmd.bcmdModel.ModelBCMD.output_parse`.
targets : list of :obj:`str`
List of model targets, which should all be strings.
zero_flag : dict
Dictionary of form target(:obj:`str`): bool, where bool indicates
whether to zero that target.
Note: zero_flag keys should match targets list.
Returns
-------
distances : dict
Dictionary of form:
{'TOTAL': summed distance of all signals,
'target1: distance of 1st target',
...
'targetN': distance of Nth target
}
"""
@staticmethod
def zero_array(array, zero_flag):
"""Zero an array of data with its initial values.
Parameters
----------
array : list
List of data
zero_flags : bool
Boolean indicating if data needs zeroing
Returns
-------
zerod : list
Zero'd list
"""
if zero_flag:
init = float(array[0])
zerod = [x - init for x in array]
else:
zerod = array
return zerod
def __init__(self, data, targets, zero_flag, observed_data=None):
self.d0 = {k: self.zero_array(data[k], zero_flag[k]) for k in targets}
self.summary_stats = {k: {} for k in self.d0.keys()}
self.autocorrelation = {k: {} for k in self.d0.keys()}
if observed_data is not None:
self.observed_data = observed_data
self.residuals = {k: np.array(self.d0[k]) - np.array(observed_data[k])
for k in self.d0.keys()}
self.residual_autocorrelation = {k: {} for k in self.d0.keys()}
else:
self.residuals = None
self.observed_data = None
def get_mean(self):
for k, data in self.d0.items():
self.summary_stats[k]['mean'] = np.mean(data)
def get_skew(self):
for k, data in self.d0.items():
self.summary_stats[k]['skewness'] = stats.skew(data)
def get_median(self):
for k, data in self.d0.items():
self.summary_stats[k]['median'] = np.median(data)
def get_variance(self):
for k, data in self.d0.items():
self.summary_stats[k]['variance'] = np.var(data)
self.summary_stats[k]['std'] = np.std(data)
def get_autocorr(self):
def autocorr(y):
yunbiased = y-np.mean(y)
ynorm = np.sum(yunbiased**2)
acor = np.correlate(yunbiased, yunbiased, "same")/ynorm
# use only second half
acor = acor[len(acor)//2:]
return acor
for k, data in self.d0.items():
ac = autocorr(data)
self.autocorrelation[k] = ac
self.summary_stats[k]['max_ac'] = np.argmax(ac[len(ac)//2:])
if self.residuals:
self.residual_autocorrelation[k] = autocorr(self.residuals[k])
def get_stats(self):
self.get_mean()
self.get_variance()
self.get_autocorr()
self.get_median()
self.get_skew()
if __name__ == '__main__':
def summary_test(n=25):
results = {}
observed_data = np.random.randn(3, 100)
observed_data = np.vstack([observed_data, np.sin(
np.linspace(0, 360, 100) * np.pi / 180.)+np.random.randn(1, 100)])
observed_data[1, :] = observed_data[1, :]-observed_data[1, 0]
observed_data[3, :] = observed_data[3, :]-observed_data[3, 0]
targets = ['a', 'b', 'c', 'd']
flags = [False, True, False, True]
trueData = {k: observed_data[i] for i, k in enumerate(targets)}
for i in range(n):
results[i] = {}
random_data = np.random.randn(3, 100)
random_data = np.vstack([random_data, np.sin(
np.linspace(0, 360, 100) * np.pi / 180.)])
zero_flag = {k: flag for k, flag in zip(targets, flags)}
tsData = {k: random_data[i] for i, k in enumerate(targets)}
summary_creator = SummaryStats(
tsData, targets, zero_flag, observed_data=trueData)
summary_creator.get_stats()
results[i]['data'] = summary_creator.d0
results[i]['stats'] = summary_creator.summary_stats
results[i]['autocorr'] = summary_creator.autocorrelation
results[i]['res_autocorr'] = summary_creator.residual_autocorrelation
return results
test_stats = summary_test()
df_stats = pd.DataFrame.from_dict({(i, j): test_stats[i]['stats'][j]
for i in test_stats.keys()
for j in test_stats[i]['stats'].keys()},
orient='index')
df_stats.index.names = ['Batch', 'Signal']
melt_df = df_stats.reset_index().melt(
id_vars=['Signal'], value_vars=['mean', 'variance', 'std'])
g = sns.FacetGrid(melt_df, col="variable", row="Signal", hue="Signal")
g = g.map(sns.distplot, 'value')
plt.show()
autocorr_formatted = [{'Batch': i, 'Signal': j, 'autocorr': v, 'lag': idx} for i in test_stats.keys(
) for j in test_stats[i]['autocorr'].keys() for idx, v in enumerate(test_stats[i]['autocorr'][j])]
df_ac = pd.DataFrame(autocorr_formatted)
res_autocorr_formatted = [{'Batch': i, 'Signal': j, 'res_autocorr': v, 'lag': idx} for i in test_stats.keys(
) for j in test_stats[i]['res_autocorr'].keys() for idx, v in enumerate(test_stats[i]['res_autocorr'][j])]
df_res_ac = pd.DataFrame(res_autocorr_formatted)
g = sns.FacetGrid(data=df_ac, col='Signal')
g.map(sns.lineplot, 'lag', 'autocorr')
plt.show()
g = sns.FacetGrid(data=df_res_ac, col='Signal')
g.map(sns.lineplot, 'lag', 'res_autocorr')
plt.show()
|
gpl-2.0
|
tgsmith61591/skutil
|
skutil/feature_selection/tests/test_select.py
|
1
|
10032
|
from __future__ import print_function
import numpy as np
import pandas as pd
import warnings
from skutil.odr import QRDecomposition
from skutil.feature_selection import combos
from numpy.testing import (assert_array_equal, assert_almost_equal, assert_array_almost_equal)
from sklearn.datasets import load_iris
from skutil.feature_selection import *
from skutil.testing import assert_fails
# Def data for testing
iris = load_iris()
X = pd.DataFrame.from_records(data=iris.data, columns=iris.feature_names)
y = np.array(
[[0.41144380, 1, 2],
[0.20002043, 1, 2],
[1.77615427, 1, 2],
[-0.88393494, 1, 2],
[1.03053577, 1, 2],
[0.10348028, 1, 2],
[-2.63301012, 1, 2],
[-0.09411449, 1, 2],
[-0.37090572, 1, 2],
[3.67912713, 1, 2],
[-1.11889106, 1, 2],
[-0.16339222, 1, 2],
[-1.68642994, 1, 2],
[0.01475935, 1, 2],
[-0.71178462, 1, 2],
[-0.07375506, 1, 2],
[1.67680864, 1, 2],
[1.08437155, 1, 2],
[0.42135106, 1, 2],
[0.23891404, 1, 2],
[-0.67025244, 1, 2],
[-0.74780315, 1, 2],
[1.53795249, 1, 2],
[2.24940846, 1, 2],
[-1.33077619, 1, 2],
[-1.23597935, 1, 2],
[-1.10603714, 1, 2],
[0.06115450, 1, 2],
[2.33540909, 1, 2],
[-0.20694138, 1, 2],
[1.34077119, 1, 2],
[1.19347871, 1, 2],
[0.23480672, 1, 2],
[-1.48948507, 1, 2],
[1.00529241, 1, 2],
[1.72366825, 1, 2],
[4.14722011, 1, 2],
[-0.66620106, 1, 2],
[1.45597498, 1, 2],
[-0.39631565, 1, 2],
[0.80971318, 1, 2],
[0.71547389, 1, 2],
[-0.17342195, 1, 2],
[-1.18399696, 1, 2],
[1.77178761, 1, 2],
[-0.94494203, 1, 2],
[-1.47486102, 1, 2],
[0.35748476, 1, 2],
[-1.29096329, 1, 2],
[0.61611613, 1, 2],
[0.92048145, 1, 2],
[0.56870638, 1, 2],
[0.06455932, 1, 2],
[0.20987525, 1, 2],
[0.60659611, 1, 2],
[0.43715853, 1, 2],
[-0.06136566, 1, 2],
[-1.75842912, 1, 2],
[-1.03648110, 1, 2],
[-2.72359130, 1, 2],
[1.80935039, 1, 2],
[1.27240976, 1, 2],
[-2.74477429, 1, 2],
[0.34654907, 1, 2],
[-1.90913461, 1, 2],
[-3.42357727, 1, 2],
[-1.28010016, 1, 2],
[3.17908952, 1, 2],
[-1.54936824, 1, 2],
[-1.37700148, 1, 2],
[0.41881648, 1, 2],
[0.22241198, 1, 2],
[-0.78960214, 1, 2],
[0.28105782, 1, 2],
[2.58817288, 1, 2],
[0.88948762, 1, 2],
[1.25544532, 1, 2],
[-0.50838470, 1, 2],
[1.13062450, 1, 2],
[2.41422771, 1, 2],
[-0.86262900, 1, 2],
[-2.16937438, 1, 2],
[-0.57198596, 1, 2],
[-0.07023331, 1, 2],
[2.34332545, 1, 2],
[-0.71221171, 1, 2],
[-0.18585408, 1, 2],
[-2.81586156, 1, 2],
[-0.86356504, 1, 2],
[-0.01727535, 1, 2],
[-3.15966711, 1, 2],
[-0.84387501, 1, 2],
[-1.73471525, 1, 2],
[2.74981014, 1, 2],
[0.28114847, 1, 2],
[-1.66076523, 1, 2],
[-0.62953126, 1, 2],
[-1.90627065, 1, 2],
[-0.38711584, 1, 2],
[0.84237942, 1, 2],
[0.35066088, 1, 2],
[-0.47789289, 1, 2],
[-1.72405119, 1, 2],
[0.78935913, 1, 2],
[3.03339661, 1, 2],
[-2.68912845, 1, 2],
[0.22600963, 1, 2],
[3.72403170, 1, 2],
[0.25115682, 1, 2],
[2.51450226, 1, 2],
[-2.52882830, 1, 2],
[-1.60614569, 1, 2],
[-0.74095083, 1, 2],
[0.78927670, 1, 2],
[2.35876839, 1, 2],
[0.84019398, 1, 2],
[-2.49124992, 1, 2],
[-1.36854708, 1, 2],
[0.59393289, 1, 2],
[-0.82345534, 1, 2],
[1.16502458, 1, 2],
[-0.28916165, 1, 2],
[0.56981198, 1, 2],
[1.26863563, 1, 2],
[-2.88717380, 1, 2],
[0.01525054, 1, 2],
[-1.62951432, 1, 2],
[0.45031432, 1, 2],
[0.75238069, 1, 2],
[0.73113016, 1, 2],
[1.52144045, 1, 2],
[0.54123604, 1, 2],
[-3.18827503, 1, 2],
[-0.31185831, 1, 2],
[0.77786948, 1, 2],
[0.96769255, 1, 2],
[2.01435274, 1, 2],
[-0.86995262, 1, 2],
[1.63125106, 1, 2],
[-0.49056004, 1, 2],
[-0.17913921, 1, 2],
[1.55363112, 1, 2],
[-1.83564770, 1, 2],
[-1.22079526, 1, 2],
[-1.69420452, 1, 2],
[0.54327665, 1, 2],
[-2.07883607, 1, 2],
[0.52608135, 1, 2],
[-0.89157428, 1, 2],
[-1.07971739, 1, 2]])
Z = pd.DataFrame.from_records(data=y, columns=['A', 'B', 'C'])
def test_feature_dropper():
transformer = FeatureDropper().fit(X)
assert not transformer.cols
assert transformer.transform(X).shape[1] == 4
assert FeatureDropper(['sepal length (cm)', 'sepal width (cm)']).fit_transform(X).shape[1] == 2
# test the selective mixin
assert transformer.cols is None
def test_feature_selector():
transformer = FeatureRetainer().fit(X)
assert transformer.transform(X).shape[1] == 4
cols = ['sepal length (cm)', 'sepal width (cm)']
transformer = FeatureRetainer(cols=cols).fit(X)
assert transformer.transform(X).shape[1] == 2
# test the selective mixin
assert isinstance(transformer.cols, list)
def test_multi_collinearity():
transformer = MulticollinearityFilterer()
# Test fit_transform
x = transformer.fit_transform(X)
assert x.shape[1] == 3
col_nms = x.columns
assert col_nms[0] == 'sepal length (cm)'
assert col_nms[1] == 'sepal width (cm)'
assert col_nms[2] == 'petal width (cm)'
assert len(transformer.drop_) == 1
assert len(transformer.mean_abs_correlations_) == 1
print(transformer.correlations_) # the correlations...
# test the selective mixin
assert transformer.cols is None, 'expected None but got %s' % str(transformer.cols)
# Test fit, then transform
transformer = MulticollinearityFilterer().fit(X)
x = transformer.transform(X)
assert x.shape[1] == 3
col_nms = x.columns
assert col_nms[0] == 'sepal length (cm)'
assert col_nms[1] == 'sepal width (cm)'
assert col_nms[2] == 'petal width (cm)'
assert len(transformer.drop_) == 1
# Check as_df false
transformer.as_df = False
assert isinstance(transformer.transform(X), np.ndarray)
# check 1.0
transformer = MulticollinearityFilterer(threshold=1.0).fit(X)
assert not transformer.drop_
# make sure non-square will fail
assert_fails(filter_collinearity, ValueError, pd.DataFrame.from_records(np.ones((3, 2))), 0.6)
def test_nzv_filterer():
transformer = NearZeroVarianceFilterer().fit(X)
assert not transformer.drop_
z = X.copy()
z['zeros'] = np.zeros(150)
transformer = NearZeroVarianceFilterer().fit(z)
assert len(transformer.drop_) == 1
assert transformer.drop_[0] == 'zeros'
assert transformer.transform(z).shape[1] == 4
# test the selective mixin
assert transformer.cols is None, 'expected None but got %s' % str(transformer.cols)
# see what happens if we have a nan or inf in the mix:
a = pd.DataFrame.from_records(data=np.reshape(np.arange(25), (5, 5)))
a.iloc[0, 0] = np.inf
a.iloc[0, 1] = np.nan
# expect a ValueError
assert_fails(NearZeroVarianceFilterer().fit, ValueError, a)
# test with the ratio strategy
transformer = NearZeroVarianceFilterer(strategy='ratio', threshold=0.1)
assert_fails(transformer.fit, ValueError, z) # will fail because thresh must be greater than 1.0
x = np.array([
[1, 2, 3],
[1, 5, 3],
[1, 2, 4],
[2, 5, 4]
])
df = pd.DataFrame.from_records(data=x, columns=['a', 'b', 'c'])
transformer = NearZeroVarianceFilterer(strategy='ratio', threshold=3.0).fit(df)
assert len(transformer.drop_) == 1
assert transformer.drop_[0] == 'a'
assert len(transformer.var_) == 1
assert transformer.var_['a'] == 3.0
def test_feature_dropper_warning():
x = np.array([
[1, 2, 3],
[1, 2, 3],
[1, 2, 4]
])
df = pd.DataFrame.from_records(data=x, columns=['a', 'b', 'c'])
# catch the warning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
FeatureDropper(cols=['d']).fit_transform(df)
assert len(w) == 1
def test_linear_combos():
lcf = LinearCombinationFilterer().fit(Z)
assert_array_equal(lcf.drop_, ['C'])
z = lcf.transform(Z)
assert_array_equal(z.columns.values, ['A', 'B'])
assert (z.B == 1).all()
# test on no linear combos
lcf = LinearCombinationFilterer(cols=['A', 'B']).fit(Z)
assert not lcf.drop_
assert Z.equals(lcf.transform(Z))
# test too few features
assert_fails(LinearCombinationFilterer(cols=['A']).fit, ValueError, Z)
def test_sparsity():
x = np.array([
[1, 2, 3],
[1, np.nan, np.nan],
[1, 2, np.nan]
])
df = pd.DataFrame.from_records(data=x, columns=['a', 'b', 'c'])
# test at .33 level
filt = SparseFeatureDropper(threshold=0.3).fit(df)
assert len(filt.drop_) == 2
assert all([i in filt.drop_ for i in ('b', 'c')]), 'expected "b" and "c" but got %s' % ', '.join(filt.drop_)
assert isinstance(filt.drop_, list)
# test at 2/3 level
filt = SparseFeatureDropper(threshold=0.6).fit(df)
assert len(filt.drop_) == 1
assert 'c' in filt.drop_, 'expected "c" but got %s' % filt.drop_
# test with a bad value
assert_fails(SparseFeatureDropper(threshold=1.0).fit, ValueError, df)
assert_fails(SparseFeatureDropper(threshold=-0.1).fit, ValueError, df)
assert_fails(SparseFeatureDropper(threshold='a').fit, ValueError, df)
# only try on the 'a' col
filt = SparseFeatureDropper(cols=['a']).fit(df)
assert not filt.drop_
def test_enum_lc():
z = np.array([
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12]
])
a, b = combos._enum_lc(QRDecomposition(z))[0], np.array([2, 0, 1])
assert (a == b).all(), 'should be [2,0,1] but got %s' % a
assert not combos._enum_lc(QRDecomposition(iris.data))
assert_array_equal(combos._enum_lc(QRDecomposition(y))[0], np.array([2, 1]))
|
bsd-3-clause
|
rahuldhote/scikit-learn
|
examples/linear_model/plot_lasso_coordinate_descent_path.py
|
254
|
2639
|
"""
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter)
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print("Computing regularization path using the lasso...")
alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False)
print("Computing regularization path using the positive lasso...")
alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path(
X, y, eps, positive=True, fit_intercept=False)
print("Computing regularization path using the elastic net...")
alphas_enet, coefs_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, fit_intercept=False)
print("Computing regularization path using the positve elastic net...")
alphas_positive_enet, coefs_positive_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False)
# Display results
plt.figure(1)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_enet), coefs_enet.T, linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and Elastic-Net Paths')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
plt.axis('tight')
plt.figure(2)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_positive_lasso), coefs_positive_lasso.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and positive Lasso')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
plt.axis('tight')
plt.figure(3)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_enet), coefs_enet.T)
l2 = plt.plot(-np.log10(alphas_positive_enet), coefs_positive_enet.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Elastic-Net and positive Elastic-Net')
plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
loc='lower left')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
gchild320/flounder
|
scripts/tracing/dma-api/plotting.py
|
96
|
4043
|
"""Ugly graph drawing tools"""
import matplotlib.pyplot as plt
import matplotlib.cm as cmap
#import numpy as np
from matplotlib import cbook
# http://stackoverflow.com/questions/4652439/is-there-a-matplotlib-equivalent-of-matlabs-datacursormode
class DataCursor(object):
"""A simple data cursor widget that displays the x,y location of a
matplotlib artist when it is selected."""
def __init__(self, artists, tolerance=5, offsets=(-20, 20),
template='x: %0.2f\ny: %0.2f', display_all=False):
"""Create the data cursor and connect it to the relevant figure.
"artists" is the matplotlib artist or sequence of artists that will be
selected.
"tolerance" is the radius (in points) that the mouse click must be
within to select the artist.
"offsets" is a tuple of (x,y) offsets in points from the selected
point to the displayed annotation box
"template" is the format string to be used. Note: For compatibility
with older versions of python, this uses the old-style (%)
formatting specification.
"display_all" controls whether more than one annotation box will
be shown if there are multiple axes. Only one will be shown
per-axis, regardless.
"""
self.template = template
self.offsets = offsets
self.display_all = display_all
if not cbook.iterable(artists):
artists = [artists]
self.artists = artists
self.axes = tuple(set(art.axes for art in self.artists))
self.figures = tuple(set(ax.figure for ax in self.axes))
self.annotations = {}
for ax in self.axes:
self.annotations[ax] = self.annotate(ax)
for artist in self.artists:
artist.set_picker(tolerance)
for fig in self.figures:
fig.canvas.mpl_connect('pick_event', self)
def annotate(self, ax):
"""Draws and hides the annotation box for the given axis "ax"."""
annotation = ax.annotate(self.template, xy=(0, 0), ha='right',
xytext=self.offsets, textcoords='offset points', va='bottom',
bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0')
)
annotation.set_visible(False)
return annotation
def __call__(self, event):
"""Intended to be called through "mpl_connect"."""
# Rather than trying to interpolate, just display the clicked coords
# This will only be called if it's within "tolerance", anyway.
x, y = event.mouseevent.xdata, event.mouseevent.ydata
try:
annotation = self.annotations[event.artist.axes]
except KeyError:
return
if x is not None:
if not self.display_all:
# Hide any other annotation boxes...
for ann in self.annotations.values():
ann.set_visible(False)
# Update the annotation in the current axis..
annotation.xy = x, y
annotation.set_text(self.template % (x, y))
annotation.set_visible(True)
event.canvas.draw()
def plotseries(*serieslabels):
"""Plot lists of series in separate axes, tie time axis together"""
global fig
fig, axes = plt.subplots(nrows=len(serieslabels), sharex=True)
for subplot, ax in zip(serieslabels, axes):
for ser, lab in zip(*subplot): # subplot = ([x], [y])
ax.step(ser[0], ser[1], label=lab, where="post")
ax.grid(True)
ax.legend()
(DataCursor(ax.lines))
plt.grid(True)
plt.show()
def disp_pic(bitmap):
"""Display the allocation bitmap. TODO."""
fig=plt.figure()
a=fig.add_subplot(1,1,1)
fig.clf()
implt=plt.imshow(bitmap, extent=(0, len(bitmap[0]), 0, len(bitmap)),
interpolation="nearest", cmap=cmap.gist_heat)
fig.canvas.draw()
plt.show()
|
gpl-2.0
|
ronaldahmed/SLAM-for-ugv
|
neural-navigation-with-lstm/MARCO/Utility/logistic_regression.py
|
2
|
9820
|
## Automatically adapted for numpy.oldnumeric May 17, 2011 by -c
"""
Python module for computing Logistic Regression.
Requires numarray or Numeric.
Version: 20050711
Contact: Jeffrey Whitaker <[email protected]>
This code is released into the Public Domain as is.
No support or warrantee is provided. Comments, bug reports
and enhancements are welcome.
"""
_use_numarray=False
try:
import numarray as NA
import numarray.linear_algebra as LA
_use_numarray = True
except:
try:
import numpy.oldnumeric as NA
import numpy.oldnumeric.linear_algebra as LA
except:
raise ImportError, 'Requires numarray or Numeric'
def _simple_logistic_regression(x,y,beta_start=None,verbose=False,
CONV_THRESH=1.e-3,MAXIT=500):
"""
Faster than logistic_regression when there is only one predictor.
"""
if len(x) != len(y):
raise ValueError, "x and y should be the same length!"
if beta_start is None:
beta_start = NA.zeros(2,x.dtype.char)
iter = 0; diff = 1.; beta = beta_start # initial values
if verbose:
print 'iteration beta log-likliehood |beta-beta_old|'
while iter < MAXIT:
beta_old = beta
p = NA.exp(beta[0]+beta[1]*x)/(1.+NA.exp(beta[0]+beta[1]*x))
l = NA.sum(y*NA.log(p) + (1.-y)*NA.log(1.-p)) # log-likliehood
s = NA.array([NA.sum(y-p), NA.sum((y-p)*x)]) # scoring function
# information matrix
J_bar = NA.array([[NA.sum(p*(1-p)),NA.sum(p*(1-p)*x)],
[NA.sum(p*(1-p)*x),NA.sum(p*(1-p)*x*x)]])
beta = beta_old + NA.dot(LA.inverse(J_bar),s) # new value of beta
diff = NA.sum(NA.fabs(beta-beta_old)) # sum of absolute differences
if verbose:
print iter+1, beta, l, diff
if diff <= CONV_THRESH: break
iter = iter + 1
return beta, J_bar, l
def logistic_regression(x,y,beta_start=None,verbose=False,CONV_THRESH=1.e-3,
MAXIT=500):
"""
Uses the Newton-Raphson algorithm to calculate a maximum
likelihood estimate logistic regression.
The algorithm is known as 'iteratively re-weighted least squares', or IRLS.
x - rank-1 or rank-2 array of predictors. If x is rank-2,
the number of predictors = x.shape[0] = N. If x is rank-1,
it is assumed N=1.
y - binary outcomes (if N>1 len(y) = x.shape[1], if N=1 len(y) = len(x))
beta_start - initial beta vector (default zeros(N+1,x.dtype.char))
if verbose=True, diagnostics printed for each iteration (default False).
MAXIT - max number of iterations (default 500)
CONV_THRESH - convergence threshold (sum of absolute differences
of beta-beta_old, default 0.001)
returns beta (the logistic regression coefficients, an N+1 element vector),
J_bar (the (N+1)x(N+1) information matrix), and l (the log-likeliehood).
J_bar can be used to estimate the covariance matrix and the standard
error for beta.
l can be used for a chi-squared significance test.
covmat = inverse(J_bar) --> covariance matrix of coefficents (beta)
stderr = sqrt(diag(covmat)) --> standard errors for beta
deviance = -2l --> scaled deviance statistic
chi-squared value for -2l is the model chi-squared test.
"""
if x.shape[-1] != len(y):
raise ValueError, "x.shape[-1] and y should be the same length!"
try:
N, npreds = x.shape[1], x.shape[0]
except: # single predictor, use simple logistic regression routine.
return _simple_logistic_regression(x,y,beta_start=beta_start,
CONV_THRESH=CONV_THRESH,MAXIT=MAXIT,verbose=verbose)
if beta_start is None:
beta_start = NA.zeros(npreds+1,x.dtype.char)
X = NA.ones((npreds+1,N), x.dtype.char)
X[1:, :] = x
Xt = NA.transpose(X)
iter = 0; diff = 1.; beta = beta_start # initial values
if verbose:
print 'iteration beta log-likliehood |beta-beta_old|'
while iter < MAXIT:
beta_old = beta
ebx = NA.exp(NA.dot(beta, X))
p = ebx/(1.+ebx)
l = NA.sum(y*NA.log(p) + (1.-y)*NA.log(1.-p)) # log-likeliehood
s = NA.dot(X, y-p) # scoring function
J_bar = NA.dot(X*p,Xt) # information matrix
beta = beta_old + NA.dot(LA.inverse(J_bar),s) # new value of beta
diff = NA.sum(NA.fabs(beta-beta_old)) # sum of absolute differences
if verbose:
print iter+1, beta, l, diff
if diff <= CONV_THRESH: break
iter = iter + 1
if iter == MAXIT and diff > CONV_THRESH:
print 'warning: convergence not achieved with threshold of %s in %s iterations' % (CONV_THRESH,MAXIT)
return beta, J_bar, l
def calcprob(beta, x):
"""
calculate probabilities (in percent) given beta and x
"""
try:
N, npreds = x.shape[1], x.shape[0]
except: # single predictor, x is a vector, len(beta)=2.
N, npreds = len(x), 1
if len(beta) != npreds+1:
raise ValueError,'sizes of beta and x do not match!'
if npreds==1: # simple logistic regression
return 100.*NA.exp(beta[0]+beta[1]*x)/(1.+NA.exp(beta[0]+beta[1]*x))
X = NA.ones((npreds+1,N), x.dtype.char)
X[1:, :] = x
ebx = NA.exp(NA.dot(beta, X))
return 100.*ebx/(1.+ebx)
if __name__ == '__main__':
# this example uses three correlated time series drawn from
# a trivariate normal distribution. The first is taken to be the
# observations, the other two are considered to be forecasts
# of the observations. For example, the observations could
# be the temperature in Boulder, and the other two could
# be forecasts of temperature from two different weather prediction
# models. A logistic regression is used to compute
# the conditional probability that the observation will be greater
# than zero given the forecasts.
if _use_numarray:
from numarray.random_array import multivariate_normal
import numarray.mlab as mlab
else:
from numpy.oldnumeric.random_array import multivariate_normal
import numpy.oldnumeric.mlab as mlab
# number of realizations.
nsamps = 100000
# correlations
r12 = 0.5 # average correlation between the first predictor and the obs.
r13 = 0.25 # avg correlation between the second predictor and the obs.
r23 = 0.125 # avg correlation between predictors.
# random draws from trivariate normal distribution
x = multivariate_normal(NA.array([0,0,0]),NA.array([[1,r12,r13],[r12,1,r23],[r13,r23,1]]), nsamps)
x2 = multivariate_normal(NA.array([0,0,0]),NA.array([[1,r12,r13],[r12,1,r23],[r13,r23,1]]), nsamps)
print 'correlations (r12,r13,r23) = ',r12,r13,r23
print 'number of realizations = ',nsamps
# training data.
obs = x[:,0]
climprob = NA.sum((obs > 0).astype('f'))/nsamps
fcst = NA.transpose(x[:,1:]) # 2 predictors.
obs_binary = obs > 0.
# independent data for verification.
obs2 = x2[:,0]
fcst2 = NA.transpose(x2[:,1:])
# compute logistic regression.
beta,Jbar,llik = logistic_regression(fcst,obs_binary,verbose=True)
covmat = LA.inverse(Jbar)
stderr = NA.sqrt(mlab.diag(covmat))
print 'beta =' ,beta
print 'standard error =',stderr
# forecasts from independent data.
prob = calcprob(beta, fcst2)
# compute Brier Skill Score
verif = (obs2 > 0.).astype('f')
bs = mlab.mean((0.01*prob - verif)**2)
bsclim = mlab.mean((climprob - verif)**2)
bss = 1.-(bs/bsclim)
print 'Brier Skill Score (should be within +/- 0.1 of 0.18) = ',bss
# calculate reliability.
# see http://www.bom.gov.au/bmrc/wefor/staff/eee/verif/verif_web_page.html
# for information on the Brier Skill Score and reliability diagrams.
totfreq = NA.zeros(10,'f')
obfreq = NA.zeros(10,'f')
for icat in range(10):
prob1 = icat*10.
prob2 = (icat+1)*10.
test1 = prob > prob1
test2 = prob <= prob2
testf = 1.0*test1*test2
testfv = verif*testf
totfreq[icat] = NA.sum(testf)
obfreq[icat] = NA.sum(testfv)
fcstprob = NA.zeros(10,'f')
reliability = NA.zeros(10,'f')
frequse = NA.zeros(10,'f')
print 'fcst prob, reliability, frequency of use'
for icat in range(10):
prob1 = icat*10.
prob2 = (icat+1)*10.
fcstprob[icat] = 0.5*(prob1+prob2)
reliability[icat]=1.e20
if totfreq[icat] > nsamps/1000.:
reliability[icat] = 100.*obfreq[icat]/totfreq[icat]
frequse[icat] = 100.*totfreq[icat]/nsamps
print fcstprob[icat],reliability[icat],frequse[icat]\
# plot reliability diagram if matplotlib installed.
try:
from pylab import *
doplot = True
except:
doplot = False
if doplot:
from matplotlib.numerix import ma
reliability = ma.masked_values(reliability, 1.e20)
fig=figure(figsize=(6.5,6.5))
ax = fig.add_axes([0.1,0.1,0.8,0.8])
plot(fcstprob,reliability,'bo-')
plot(arange(0,110,10),arange(0,110,10),'r--')
xlabel('forecast probability')
ylabel('observed frequency')
title('Reliability Diagram')
text(55,15,'Brier Skill Score = %4.2f' % bss,fontsize=14)
ax2 = fig.add_axes([.2, .6, .25, .2], axisbg='y')
bar(10*arange(10), frequse, width=10)
xlabel('forecast probability',fontsize=10)
ylabel('percent issued',fontsize=10)
title('Frequency of Use',fontsize=12)
ax2.set_xticklabels(arange(20,120,20),fontsize=9)
ax2.set_yticklabels(arange(20,120,20),fontsize=9)
ax.set_xticks(arange(5,100,5)); ax.set_yticks(arange(5,100,5))
ax.grid(True)
print 'saving reliability diagram ...'
savefig('reliability')
|
mit
|
cactusbin/nyt
|
matplotlib/examples/pylab_examples/multiline.py
|
12
|
1224
|
#!/usr/bin/env python
from pylab import *
#from matplotlib.pyplot import *
#from numpy import arange
if 1:
figure(figsize=(7, 4))
ax = subplot(121)
ax.set_aspect(1)
plot(arange(10))
xlabel('this is a xlabel\n(with newlines!)')
ylabel('this is vertical\ntest', multialignment='center')
#ylabel('this is another!')
text(2, 7,'this is\nyet another test',
rotation=45,
horizontalalignment = 'center',
verticalalignment = 'top',
multialignment = 'center')
grid(True)
subplot(122)
text(0.29, 0.7, "Mat\nTTp\n123", size=18,
va="baseline", ha="right", multialignment="left",
bbox=dict(fc="none"))
text(0.34, 0.7, "Mag\nTTT\n123", size=18,
va="baseline", ha="left", multialignment="left",
bbox=dict(fc="none"))
text(0.95, 0.7, "Mag\nTTT$^{A^A}$\n123", size=18,
va="baseline", ha="right", multialignment="left",
bbox=dict(fc="none"))
xticks([0.2, 0.4, 0.6, 0.8, 1.],
["Jan\n2009","Feb\n2009","Mar\n2009", "Apr\n2009", "May\n2009"])
axhline(0.7)
title("test line spacing for multiline text")
subplots_adjust(bottom=0.25, top=0.8)
draw()
show()
|
unlicense
|
reidbradley/prospecting
|
prospecting/api.py
|
1
|
28045
|
from __future__ import print_function
import httplib2
import os
import json
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
import pandas as pd
from prospecting.env import (PROJECTNAME,
CREDSDIR,
CLIENT_SECRET_FILE,
DATADIR,
NOAUTH_LOCAL_WEBSERVER
)
try:
import argparse
parser = argparse.ArgumentParser(parents=[tools.argparser])
parser.set_defaults(noauth_local_webserver=NOAUTH_LOCAL_WEBSERVER)
flags = parser.parse_known_args()[0]
except ImportError:
flags = None
import logging
log = logging.getLogger('prospecting.api')
class GoogleApi:
"""Base class for interfacing with Google APIs
https://developers.google.com/apis-explorer/
"""
def __init__(self, apiname, apiversion, scopelist):
"""Initialize GoogleApi base class
Args:
apiname (str): Name of Google API, example: 'sheets'
apiversion (str): Version of API, example: 'v4'
scopelist (list): List of authorization scopes, example: []
"""
self.api_name = apiname
self.api_version = apiversion
self.api_id = (self.api_name + ":" + self.api_version)
self.api_scope = scopelist
#self.discovery_url = ('https://' + self.api_name + '.googleapis.com/$discovery/rest?'
# 'version=' + self.api_version)
self.discovery_url = ('https://www.googleapis.com/discovery/v1/apis/' + self.api_name +
'/' + self.api_version + '/rest')
self.api_info = self._discover_api(self.discovery_url)
def authenticate(self):
log.info('Authenticating...{0}, {1}'.format(self.api_name, self.api_version))
self.credentials = self._get_credentials(self.api_scope)
self.http = self.credentials.authorize(httplib2.Http())
service = self._build_service_object()
log.info('Successfully authenticated...{0}, {1}'.format(self.api_name, self.api_version))
return service
def reauthenticate(self, scopelist):
if os.path.isfile(self.credential_path):
os.remove(self.credential_path)
self.api_scope = scopelist
self.authenticate()
def _get_credentials(self, scopelist):
log.info('Getting credentials...')
credsfile = ('googleapis.' + self.api_name + '.' + PROJECTNAME + '.json')
self.credential_path = os.path.join(CREDSDIR, credsfile)
self.store = Storage(self.credential_path)
file_exists = os.path.isfile(self.credential_path)
scopes_match = False
if file_exists:
with open(self.credential_path) as f:
credjson = json.load(f)
scopes_match = set(credjson['scopes']) == set(scopelist)
if scopes_match:
creds = self.store.get()
else:
creds = None
if (not creds or creds.invalid):
creds = self._run_credentials_flow()
return creds
def _run_credentials_flow(self):
log.info('Running credentials flow...')
secretspath = os.path.join(CREDSDIR, CLIENT_SECRET_FILE)
flow = client.flow_from_clientsecrets(secretspath, self.api_scope)
flow.user_agent = PROJECTNAME
if flags or flags is None:
self.credentials = tools.run_flow(flow, self.store, flags)
else: # Needed only for compatibility with Python 2.6
self.credentials = tools.run(self.flow, self.store)
log.info('Storing credentials to {0}'.format(self.credential_path))
return self.credentials
def _build_service_object(self):
log.info('Building service object...')
service_object = discovery.build(self.api_name,
self.api_version,
http=self.http,
discoveryServiceUrl=self.discovery_url)
log.info('Service object built...{0}'.format(service_object))
return service_object
def _discover_api(self, discoveryurl):
discovery_file = os.path.join(DATADIR,
'discoveryapi_' + self.api_name + '.json')
if os.path.isfile(discovery_file):
log.info(('Reading discovery file for {0}').format(self.api_id))
with open(discovery_file) as f:
disco_info = json.load(f)
else:
h = httplib2.Http()
resp, content = h.request(discoveryurl, 'GET')
log.info(('Resp from 1st discoveryurl attempt: {0}'.format(resp['status'])))
if resp['status'] == '404':
DISCOVERY_URI = 'https://www.googleapis.com/discovery/v1/apis?preferred=true'
resp2, content2 = h.request(DISCOVERY_URI, 'GET')
disco_all = json.loads(content2.decode())
disco_api = [apiinfo for apiinfo in disco_all['items'] for k, v in apiinfo.items() if v == self.api_id][0]
self.discovery_url = disco_api['discoveryRestUrl']
resp, content = h.request(self.discovery_url, 'GET')
if resp['status'] == '404':
try:
raise Exception(resp['status'])
except Exception as e:
log.error('Error response in 2nd discoveryurl attempt: {0}'.format(e))
assert resp['status'] != '404'
else:
disco_info = json.loads(content.decode())
print(disco_info)
log.info(('Resp from 2nd discoveryurl attempt: {0}'.format(resp['status'])))
disco_info = json.loads(content.decode())
log.info(('Writing discovery file for {0}').format(self.api_id))
with open(discovery_file, 'w') as outfile:
json.dump(json.loads(content.decode()), outfile)
log.info('Read from api, write to file complete. Check new file in' + discovery_file)
return disco_info
class SheetsApi(GoogleApi):
"""Class for SheetsApi object
https://developers.google.com/resources/api-libraries/documentation/sheets/v4/python/latest/
https://developers.google.com/apis-explorer/#p/sheets/v4/
"""
def __init__(self,
apiname='sheets',
apiversion='v4',
spreadsheetid=None,
sheetrange=None,
scopelist=['https://www.googleapis.com/auth/spreadsheets.readonly',
'https://www.googleapis.com/auth/drive.readonly']):
self.spreadsheet_id = spreadsheetid
self.sheet_range = sheetrange
self.info = None # reserved for metadata
self.sheets = {} # store data from get requests
GoogleApi.__init__(self, apiname, apiversion, scopelist)
pass
def authenticate(self):
self.service = GoogleApi.authenticate(self)
def get_ss_info(self, sheetranges=None, includegriddata=False):
"""Returns the spreadsheet from a given ID
Params:
sheetranges (list): List of comma separated range names as strings, Ex: ['Sheet1','Sheet2!A1:B5]
includegriddata (bool): True if grid data should be returned, Ex: True
Returns:
{
"spreadsheetId": "1MdZzXvqftMJTfLdbBpzYJA42kCv9R6SSEAT5tSUNe5g",
"properties": {
...
},
"sheets": [
{
"properties": {
...
},
...
}
],
"spreadsheetUrl": "https://docs.google.com/spreadsheets/d/.../edit"
}
"""
spreadsheetid = self.spreadsheet_id
if sheetranges is None:
if spreadsheetid is None:
raise ValueError('Please set self.spreadsheet_id')
response = self.service.spreadsheets().get(
spreadsheetId=spreadsheetid,
includeGridData=includegriddata
).execute()
log.info('Spreadsheet loaded.')
log.info('Sheets include: {0}'.format([sheet['properties']['title'] for sheet in response['sheets']]))
return response
else:
response = self.service.spreadsheets().get(
spreadsheetId=spreadsheetid,
ranges=sheetranges,
includeGridData=includegriddata
).execute()
return response
def get(self,
sheetrange=None,
asdataframe=True,
headerrow=0,
majordimension='ROWS',
valuerenderoption='FORMATTED_VALUE',
datetimerenderoption='SERIAL_NUMBER'):
"""Returns one range of values from a spreadsheet.
Params:
sheetrange (str): Name of range to get, Ex: ['Sheet1']
asdataframe (bool): Flag to determine response type, Ex: False
headerrow (int): Specifies location of header, Ex: 2
majordimension (str): The major dimension that results should use, Ex 'COLUMNS'
valuerenderoption (str): How values should be represented in the output, Ex: 'UNFORMATTED_VALUE'
datetimerenderoption (str): How dates, times, and durations should be represented in the output,
Ex: 'FORMATTED_STRING'
Returns:
Google Sheet as requested
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/get
"""
tmpdf = pd.DataFrame()
spreadsheetid = self.spreadsheet_id
if spreadsheetid is None:
raise ValueError('Please set self.spreadsheet_id')
if not sheetrange:
sheetrange = self.sheet_range
self.response = self.service.spreadsheets().values().get(
spreadsheetId=spreadsheetid,
range=sheetrange,
majorDimension=majordimension,
valueRenderOption=valuerenderoption,
dateTimeRenderOption=datetimerenderoption
).execute()
values = self.response.get('values', None)
if not values:
log.info('No data found.')
tmpdf = None
else:
if headerrow is not None:
if asdataframe is True:
try:
tmpdf = pd.DataFrame.from_records(values[(headerrow + 1):len(values)],
columns=values[headerrow])
except AssertionError as err:
print('AssertionError: {0}'.format(err))
print('No columns in headerrow. Add columns to sheet or pass headerrow=None.')
print('Check self.data for malformed response (no columns set).')
else:
tmpdf = values[(headerrow + 1)]
else:
if asdataframe is True:
tmpdf = pd.DataFrame.from_records(values[0:len(values)])
else:
tmpdf = values[0:len(values)]
return (tmpdf)
def batchGet(self,
sheetranges,
majordimension='ROWS',
valuerenderoption='FORMATTED_VALUE',
datetimerenderoption='SERIAL_NUMBER'):
"""Returns one or more ranges of values from a spreadsheet.
Params:
sheetranges (list): List of comma separated range names as strings, Ex: ['Sheet1','Sheet2!A1:B5]
majordimension (str): The major dimension that results should use, Ex 'COLUMNS'
valuerenderoption (str): How values should be represented in the output, Ex: 'UNFORMATTED_VALUE'
datetimerenderoption (str): How dates, times, and durations should be represented in the output,
Ex: 'FORMATTED_STRING'
Returns:
List of tuples, one tuple for each range requested, Ex: [('col_1, col_2), ]
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/batchGet
"""
spreadsheetid = self.spreadsheet_id
if spreadsheetid is None:
raise ValueError('Please set self.spreadsheet_id')
if not sheetranges:
sheetranges = self.sheet_range
self.response = self.service.spreadsheets().values().batchGet(
spreadsheetId=spreadsheetid,
ranges=sheetranges,
majorDimension=majordimension,
valueRenderOption=valuerenderoption,
dateTimeRenderOption=datetimerenderoption
).execute()
values = {vr['range']: vr.get('values', []) for vr in self.response['valueRanges']}
if not values:
print('No data found.')
return {k: v for k, v in values.items()}
def update(self,
dataframe,
sheetrange,
majordimension='ROWS',
valueinputoption='RAW',
includevaluesinresponse=False,
responsevaluerenderoption='FORMATTED_VALUE',
responsedatetimerenderoption='SERIAL_NUMBER'):
"""Sets values in a range of a spreadsheet
Params:
sheetrange (str): Name of range to get,
Ex: ['Sheet1']
valueinputoption (str): How the input data should be interpreted,
Ex: 'USER_ENTERED'
includevaluesinresponse (bool): Determines if the update response should include
the values of the cells that were updated,
Ex. False
responsevaluerenderoption (str): Determines how values in the response
should be rendered,
Ex: 'UNFORMATTED_VALUE'
responsedatetimerenderoption (str): Determines how dates, times, and durations
in the response should be rendered,
Ex: 'FORMATTED_STRING'
Returns:
response, returns "UpdateValuesResponse" in format:
{
"spreadsheetId": string,
"updatedRange": string,
"updatedRows": number,
"updatedColumns": number,
"updatedCells": number,
"updatedData": {
object(ValueRange)
},
}
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/update
"""
spreadsheetid = self.spreadsheet_id
data = {
"range": sheetrange,
"majorDimension": majordimension,
"values":
[(dataframe.columns.values.tolist())] + (dataframe.values.tolist())
}
self.response = self.service.spreadsheets().values().update(
spreadsheetId=spreadsheetid,
range=sheetrange,
valueInputOption=valueinputoption,
#includeValuesInResponse=includevaluesinresponse,
#responseValueRenderOption=responsevaluerenderoption,
#responseDateTimeRenderOption=responsedatetimerenderoption,
body=data
).execute()
if not self.response:
log.info('Update Failed!')
else:
log.info('Update Successful!')
def batchUpdate(self,):
pass
def clear(self):
pass
def batchClear(self):
pass
def append(self,
dataframe,
sheetrange,
majordimension='ROWS',
valueinputoption='RAW',
insertdataoption='INSERT_ROWS',
includevaluesinresponse=False,
responsevaluerenderoption='FORMATTED_VALUE',
responsedatetimerenderoption='SERIAL_NUMBER'):
"""Append values to a spreadsheet
Params:
sheetrange (str): The A1 notation of a range to search for a logical table of data.
Values will be appended after the last row of the table,
Ex: 'Sheet1'
valueinputoption (str): How the input data should be interpreted, Ex: 'USER_ENTERED'
insertdataoption (str): How the input data should be inserted, Example 'OVERWRITE'
includevaluesinresponse (bool): Determines if the update response should
include the values of the cells that were appended, Ex: False
responsevaluerenderoption (str): Determines how values in the response should be rendered,
Ex: 'UNFORMATTED_VALUE'
responsedatetimerenderoption (str): Determines how dates, times, and durations in the response
should be rendered,
Ex: 'FORMATTED_STRING'
Returns:
response, returns response body in format:
{
"spreadsheetId": string,
"tableRange": string,
"updates": {
object(UpdateValuesResponse)
},
}
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/append
"""
spreadsheetid = self.spreadsheet_id
data = {
"range": sheetrange,
"majorDimension": majordimension,
"values": dataframe.values.tolist()
#[(dataframe.columns.values.tolist())] + (dataframe.values.tolist())
}
self.response = self.service.spreadsheets().values().append(
spreadsheetId=spreadsheetid,
range=sheetrange,
valueInputOption='RAW',
body=data
).execute()
if not self.response:
log.info('No data found.')
else:
log.info('Append Successful!')
def extract_sheet_names(self):
pass
def load_sheets(self, sheetslist, batch=None):
data = {}
if batch is None:
batch = self.batchGet(sheetslist)
for s in sheetslist:
tmp = [value for key, value in batch.items() if s in key][0]
if tmp is None:
data[s] = tmp
else:
try:
data[s] = pd.DataFrame.from_records(tmp[1:len(tmp[1])],
columns=tmp[0][0:len(tmp[1])])
except:
log.warning('Failed to load dataframe, returning tmp')
data[s] = tmp
return (data)
class DriveApi(GoogleApi):
"""Class for DriveApi object
https://developers.google.com/resources/api-libraries/documentation/drive/v3/python/latest/
https://developers.google.com/apis-explorer/#p/drive/v3/
"""
def __init__(self,
apiname='drive',
apiversion='v3',
scopelist=['https://www.googleapis.com/auth/drive.metadata.readonly']):
self.service = None
self.info = None # reserved for metadata
self.data = {} # store data from get requests
self.files = []
GoogleApi.__init__(self, apiname, apiversion, scopelist)
pass
def authenticate(self):
self.service = GoogleApi.authenticate(self)
def list_files(self,
query,
corpusdomain='user',
space='drive',
pagesize=100,
orderby='name',
pagetoken=None):
"""Lists or searches files
Params:
q: string, A query for filtering the file results. See the "Search for Files" guide for supported syntax.
corpus: string, The source of files to list.
Allowed values
domain - Files shared to the user's domain.
user - Files owned by or shared to the user.
spaces: string, A comma-separated list of spaces to query within the corpus. Supported values are 'drive', 'appDataFolder' and 'photos'.
pageSize: integer, The maximum number of files to return per page.
orderBy: string, A comma-separated list of sort keys. Valid keys are 'createdTime', 'folder', 'modifiedByMeTime', 'modifiedTime', 'name', 'quotaBytesUsed', 'recency', 'sharedWithMeTime', 'starred', and 'viewedByMeTime'. Each key sorts ascending by default, but may be reversed with the 'desc' modifier. Example usage: ?orderBy=folder,modifiedTime desc,name. Please note that there is a current limitation for users with approximately one million files in which the requested sort order is ignored.
pageToken: string, The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response.
Returns:
object
"""
files = []
log.info("Requesting files with {0}, {1}, {2}, {3}, {4}, {5}".format(
query, corpusdomain, space, pagesize, orderby, pagetoken))
while self.service:
response = self.service.files().list(
q=query,
corpus=corpusdomain,
spaces=space,
orderBy=orderby,
#fields='nextPageToken, files(id, name, mimeType)',
pageSize=pagesize,
pageToken=pagetoken
).execute()
if response['files']:
for file in response.get('files', []):
files.append(file)
"""
filename = file.get('name')
fileid = file.get('id')
mimetype = file.get('mimeType')
print('Found file: {0} ({1}) {2}'.format(filename, fileid, mimetype))
"""
pagetoken = response.get('nextPageToken', None)
if pagetoken is None:
log.info('File list received! Total files in list: {0}. Check the class instance attribute `driveapiobject.files` for file list.'.format(len(files)))
break
else:
log.info('No files found matching query: {0}'.format(query))
files = None
return files
def create(self,
_body=None,
mediabody=None,
keeprevforever=None,
usecontentasidxtxt=None,
ignoredefvis=None,
ocrlang=None):
"""
Params:
_body (object): The request body
mediabody (str): The filename of the media request body, or an instance of a MediaUpload object.
keeprevforever (bool): Whether to set the 'keepForever' field in the new head revision. This is only applicable to files with binary content in Drive.
usecontentasidxtxt (bool): Whether to use the uploaded content as indexable text.
ignoredefaultvis (bool): Whether to ignore the domain's default visibility settings for the created file. Domain administrators can choose to make all uploaded files visible to the domain by default; this parameter bypasses that behavior for the request. Permissions are still inherited from parent folders.
ocrlang (str): A language hint for OCR processing during image import (ISO 639-1 code).
Returns:
object
https://developers.google.com/resources/api-libraries/documentation/drive/v3/python/latest/drive_v3.files.html#create
"""
pass
def get(self,
fileid,
ackabuse=None):
"""Get a file's metadata or content by ID
Args:
fileid: string, The ID of the file. (required)
ackabuse: boolean, Whether the user is acknowledging the risk of downloading known malware or other abusive files. This is only applicable when alt=media.
Returns:
dict object
https://developers.google.com/resources/api-libraries/documentation/drive/v3/python/latest/drive_v3.files.html#get
"""
while self.service:
response = self.service.files().get(
fileId = fileid
).execute()
return (response)
def get_media(self,
fileid,
ackabuse=None):
"""Get a file's metadata or content by ID
Args:
fileid: string, The ID of the file. (required)
ackabuse: boolean, Whether the user is acknowledging the risk of downloading known malware or other abusive files. This is only applicable when alt=media.
Returns:
The media object as a string.
https://developers.google.com/resources/api-libraries/documentation/drive/v3/python/latest/drive_v3.files.html#get_media
"""
pass
def update(self,
fileid,
_body=None,
mediabody=None,
addparents=None,
removeparents=None,
usecontentasidxtxt=None,
ignoredefvis=None,
ocrlang=None):
"""Updates a file's metadata and/or content with patch semantics.
Params:
fileid (str): The ID of the file. (required)
_body (object): The request body
mediabody (str): The filename of the media request body, or an instance of a MediaUpload object.
addarents (str): A comma-separated list of parent IDs to add.
removeparents (str): A comma-separated list of parent IDs to remove.
usecontentasidxtxt (bool): Whether to use the uploaded content as indexable text.
ignoredefaultvis (bool): Whether to ignore the domain's default visibility settings for the created file. Domain administrators can choose to make all uploaded files visible to the domain by default; this parameter bypasses that behavior for the request. Permissions are still inherited from parent folders.
ocrlang (str): A language hint for OCR processing during image import (ISO 639-1 code).
Returns:
dict object
https://developers.google.com/resources/api-libraries/documentation/drive/v3/python/latest/drive_v3.files.html#update
"""
pass
def copy(self,
fileid,
_body,
keeprevforever=None,
ignoredefvis=None,
ocrlang=None):
"""Creates a copy of a file and applies any requested updates with patch semantics.
Params:
fileid (str): The ID of the file. (required)
_body (object): The request body. (required)
keeprevforever (bool): Whether to set the 'keepForever' field in the new head revision. This is only applicable to files with binary content in Drive.
ignoredefvis (bool): Whether to ignore the domain's default visibility settings for the created file. Domain administrators can choose to make all uploaded files visible to the domain by default; this parameter bypasses that behavior for the request. Permissions are still inherited from parent folders.
ocrlang (str): A language hint for OCR processing during image import (ISO 639-1 code).
Returns:
object
https://developers.google.com/resources/api-libraries/documentation/drive/v3/python/latest/drive_v3.files.html#copy
"""
pass
|
mit
|
johnmwalters/ThinkStats2
|
code/regression.py
|
62
|
9652
|
"""This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import math
import pandas
import random
import numpy as np
import statsmodels.api as sm
import statsmodels.formula.api as smf
import re
import chap01soln
import first
import linear
import thinkplot
import thinkstats2
def QuickLeastSquares(xs, ys):
"""Estimates linear least squares fit and returns MSE.
xs: sequence of values
ys: sequence of values
returns: inter, slope, mse
"""
n = float(len(xs))
meanx = xs.mean()
dxs = xs - meanx
varx = np.dot(dxs, dxs) / n
meany = ys.mean()
dys = ys - meany
cov = np.dot(dxs, dys) / n
slope = cov / varx
inter = meany - slope * meanx
res = ys - (inter + slope * xs)
mse = np.dot(res, res) / n
return inter, slope, mse
def ReadVariables():
"""Reads Stata dictionary files for NSFG data.
returns: DataFrame that maps variables names to descriptions
"""
vars1 = thinkstats2.ReadStataDct('2002FemPreg.dct').variables
vars2 = thinkstats2.ReadStataDct('2002FemResp.dct').variables
all_vars = vars1.append(vars2)
all_vars.index = all_vars.name
return all_vars
def JoinFemResp(df):
"""Reads the female respondent file and joins on caseid.
df: DataFrame
"""
resp = chap01soln.ReadFemResp()
resp.index = resp.caseid
join = df.join(resp, on='caseid', rsuffix='_r')
# convert from colon-separated time strings to datetimes
join.screentime = pandas.to_datetime(join.screentime)
return join
def GoMining(df):
"""Searches for variables that predict birth weight.
df: DataFrame of pregnancy records
returns: list of (rsquared, variable name) pairs
"""
variables = []
for name in df.columns:
try:
if df[name].var() < 1e-7:
continue
formula = 'totalwgt_lb ~ agepreg + ' + name
formula = formula.encode('ascii')
model = smf.ols(formula, data=df)
if model.nobs < len(df)/2:
continue
results = model.fit()
except (ValueError, TypeError):
continue
variables.append((results.rsquared, name))
return variables
def MiningReport(variables, n=30):
"""Prints variables with the highest R^2.
t: list of (R^2, variable name) pairs
n: number of pairs to print
"""
all_vars = ReadVariables()
variables.sort(reverse=True)
for mse, name in variables[:n]:
key = re.sub('_r$', '', name)
try:
desc = all_vars.loc[key].desc
if isinstance(desc, pandas.Series):
desc = desc[0]
print(name, mse, desc)
except KeyError:
print(name, mse)
def PredictBirthWeight(live):
"""Predicts birth weight of a baby at 30 weeks.
live: DataFrame of live births
"""
live = live[live.prglngth>30]
join = JoinFemResp(live)
t = GoMining(join)
MiningReport(t)
formula = ('totalwgt_lb ~ agepreg + C(race) + babysex==1 + '
'nbrnaliv>1 + paydu==1 + totincr')
results = smf.ols(formula, data=join).fit()
SummarizeResults(results)
def SummarizeResults(results):
"""Prints the most important parts of linear regression results:
results: RegressionResults object
"""
for name, param in results.params.iteritems():
pvalue = results.pvalues[name]
print('%s %0.3g (%.3g)' % (name, param, pvalue))
try:
print('R^2 %.4g' % results.rsquared)
ys = results.model.endog
print('Std(ys) %.4g' % ys.std())
print('Std(res) %.4g' % results.resid.std())
except AttributeError:
print('R^2 %.4g' % results.prsquared)
def RunSimpleRegression(live):
"""Runs a simple regression and compare results to thinkstats2 functions.
live: DataFrame of live births
"""
# run the regression with thinkstats2 functions
live_dropna = live.dropna(subset=['agepreg', 'totalwgt_lb'])
ages = live_dropna.agepreg
weights = live_dropna.totalwgt_lb
inter, slope = thinkstats2.LeastSquares(ages, weights)
res = thinkstats2.Residuals(ages, weights, inter, slope)
r2 = thinkstats2.CoefDetermination(weights, res)
# run the regression with statsmodels
formula = 'totalwgt_lb ~ agepreg'
model = smf.ols(formula, data=live)
results = model.fit()
SummarizeResults(results)
def AlmostEquals(x, y, tol=1e-6):
return abs(x-y) < tol
assert(AlmostEquals(results.params['Intercept'], inter))
assert(AlmostEquals(results.params['agepreg'], slope))
assert(AlmostEquals(results.rsquared, r2))
def PivotTables(live):
"""Prints a pivot table comparing first babies to others.
live: DataFrame of live births
"""
table = pandas.pivot_table(live, rows='isfirst',
values=['totalwgt_lb', 'agepreg'])
print(table)
def FormatRow(results, columns):
"""Converts regression results to a string.
results: RegressionResults object
returns: string
"""
t = []
for col in columns:
coef = results.params.get(col, np.nan)
pval = results.pvalues.get(col, np.nan)
if np.isnan(coef):
s = '--'
elif pval < 0.001:
s = '%0.3g (*)' % (coef)
else:
s = '%0.3g (%0.2g)' % (coef, pval)
t.append(s)
try:
t.append('%.2g' % results.rsquared)
except AttributeError:
t.append('%.2g' % results.prsquared)
return t
def RunModels(live):
"""Runs regressions that predict birth weight.
live: DataFrame of pregnancy records
"""
columns = ['isfirst[T.True]', 'agepreg', 'agepreg2']
header = ['isfirst', 'agepreg', 'agepreg2']
rows = []
formula = 'totalwgt_lb ~ isfirst'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
formula = 'totalwgt_lb ~ agepreg'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
formula = 'totalwgt_lb ~ isfirst + agepreg'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
live['agepreg2'] = live.agepreg**2
formula = 'totalwgt_lb ~ isfirst + agepreg + agepreg2'
results = smf.ols(formula, data=live).fit()
rows.append(FormatRow(results, columns))
print(formula)
SummarizeResults(results)
PrintTabular(rows, header)
def PrintTabular(rows, header):
"""Prints results in LaTeX tabular format.
rows: list of rows
header: list of strings
"""
s = r'\hline ' + ' & '.join(header) + r' \\ \hline'
print(s)
for row in rows:
s = ' & '.join(row) + r' \\'
print(s)
print(r'\hline')
def LogisticRegressionExample():
"""Runs a simple example of logistic regression and prints results.
"""
y = np.array([0, 1, 0, 1])
x1 = np.array([0, 0, 0, 1])
x2 = np.array([0, 1, 1, 1])
beta = [-1.5, 2.8, 1.1]
log_o = beta[0] + beta[1] * x1 + beta[2] * x2
print(log_o)
o = np.exp(log_o)
print(o)
p = o / (o+1)
print(p)
like = y * p + (1-y) * (1-p)
print(like)
print(np.prod(like))
df = pandas.DataFrame(dict(y=y, x1=x1, x2=x2))
results = smf.logit('y ~ x1 + x2', data=df).fit()
print(results.summary())
def RunLogisticModels(live):
"""Runs regressions that predict sex.
live: DataFrame of pregnancy records
"""
#live = linear.ResampleRowsWeighted(live)
df = live[live.prglngth>30]
df['boy'] = (df.babysex==1).astype(int)
df['isyoung'] = (df.agepreg<20).astype(int)
df['isold'] = (df.agepreg<35).astype(int)
df['season'] = (((df.datend+1) % 12) / 3).astype(int)
# run the simple model
model = smf.logit('boy ~ agepreg', data=df)
results = model.fit()
print('nobs', results.nobs)
print(type(results))
SummarizeResults(results)
# run the complex model
model = smf.logit('boy ~ agepreg + hpagelb + birthord + C(race)', data=df)
results = model.fit()
print('nobs', results.nobs)
print(type(results))
SummarizeResults(results)
# make the scatter plot
exog = pandas.DataFrame(model.exog, columns=model.exog_names)
endog = pandas.DataFrame(model.endog, columns=[model.endog_names])
xs = exog['agepreg']
lo = results.fittedvalues
o = np.exp(lo)
p = o / (o+1)
#thinkplot.Scatter(xs, p, alpha=0.1)
#thinkplot.Show()
# compute accuracy
actual = endog['boy']
baseline = actual.mean()
predict = (results.predict() >= 0.5)
true_pos = predict * actual
true_neg = (1 - predict) * (1 - actual)
acc = (sum(true_pos) + sum(true_neg)) / len(actual)
print(acc, baseline)
columns = ['agepreg', 'hpagelb', 'birthord', 'race']
new = pandas.DataFrame([[35, 39, 3, 1]], columns=columns)
y = results.predict(new)
print(y)
def main(name, data_dir='.'):
thinkstats2.RandomSeed(17)
LogisticRegressionExample()
live, firsts, others = first.MakeFrames()
live['isfirst'] = (live.birthord == 1)
RunLogisticModels(live)
RunSimpleRegression(live)
RunModels(live)
PredictBirthWeight(live)
if __name__ == '__main__':
import sys
main(*sys.argv)
|
gpl-3.0
|
electronicvisions/deep-spike
|
lib/baseline.py
|
1
|
1652
|
from sklearn.datasets import load_digits
from sklearn import cross_validation
from sklearn import preprocessing
import tensorflow as tf
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def main():
digits = load_digits()
x_train, x_test, y_train_, y_test_ = cross_validation.train_test_split(digits.data, digits.target, test_size=0.2,
random_state=0)
lb = preprocessing.LabelBinarizer()
lb.fit(digits.target)
y_train = lb.transform(y_train_)
y_test = lb.transform(y_test_)
sess = tf.InteractiveSession()
x = tf.placeholder(tf.float32, shape=[None, 64])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
w_1 = weight_variable([64, 32])
b_1 = bias_variable([32])
h_1 = tf.nn.relu(tf.matmul(x, w_1) + b_1)
w_2 = weight_variable([32, 10])
b_2 = bias_variable([10])
y = tf.nn.softmax(tf.matmul(h_1, w_2) + b_2)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
sess.run(tf.initialize_all_variables())
for i in range(1000):
train_step.run(feed_dict={x: x_train, y_: y_train})
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(accuracy.eval(feed_dict={x: x_test, y_: y_test}))
if __name__ == '__main__':
main()
|
gpl-2.0
|
mosbys/Clone
|
Cloning_v1/Cloning_v1.py
|
1
|
15738
|
import numpy as np
#import keras
import csv
import cv2
import os
import time
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, Lambda
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import SGD,Adam
from keras.preprocessing.image import ImageDataGenerator
import random
from keras import models, optimizers, backend
from keras.layers import core, convolutional, pooling
from random import randint
import tensorflow as tf
import json
iShowDebugPic =7
newShape=np.array(3)
def augment_brightness_camera_images(image):
image1 = cv2.cvtColor(image,cv2.COLOR_RGB2HSV)
random_bright = .25+np.random.uniform()
#print(random_bright)
image1[:,:,2] = image1[:,:,2]*random_bright
image1 = cv2.cvtColor(image1,cv2.COLOR_HSV2RGB)
return image1
def preprocess(image, top_offset=.375, bottom_offset=.125):
"""
Applies preprocessing pipeline to an image: crops `top_offset` and `bottom_offset`
portions of image, resizes to half size
"""
top = int(top_offset * image.shape[0])
bottom = int(bottom_offset * image.shape[0])
image = image[top:-bottom, :]
newShape = image.shape
image= cv2.resize(image,(int(newShape[1]/2), int(newShape[0]/2)), interpolation = cv2.INTER_CUBIC)
return image
def trans_image(image,steer,trans_range):
# Translation of image - move to right and left
tr_x = trans_range*np.random.uniform()-trans_range/2
steer_ang = steer + tr_x/trans_range*2*.2
tr_y = 10*np.random.uniform()-10/2
Trans_M = np.float32([[1,0,tr_x],[0,1,tr_y]])
image_tr = cv2.warpAffine(image,Trans_M,(image.shape[1],image.shape[0]))
return image_tr,steer_ang,tr_x
def generate_next_batch(batch_size=256):
"""
This generator yields the next training batch
:param batch_size:
Number of training images in a single batch
:return:
A tuple of features and steering angles as two numpy arrays
"""
while True:
#X_batch = []
#y_batch = []
#iIndex = randint(0,len(CenterIMGPath)-batch_size)
#X_batch = np.zeros([batch_size,ImgShape[0],ImgShape[1],ImgShape[2]])
X_batch = np.zeros([batch_size,newShape[0],newShape[1],ImgShape[2]])
#LeftImg = np.zeros([batch_size,ImgShape[0],ImgShape[1],ImgShape[2]])
#RightImg = np.zeros([batch_size,ImgShape[0],ImgShape[1],ImgShape[2]])
y_batch = np.zeros(batch_size)
#for i in range(iIndex,iIndex+batch_size):
i=0
while (i<batch_size):
iSelect = randint(0,2)
iIndex = randint(0,len(CenterIMGPath)-batch_size)
if (iSelect==0):
tmpImg = cv2.imread(CenterIMGPath[iIndex],1)
y_batch[i] = SWA_hist[iIndex]
#X_batch[i-iIndex] =cv2.imread(CenterIMGPath[i],1)
elif (iSelect==1):
tmpImg =cv2.imread(LeftIMGPath[i].strip(),1)
y_batch[i] = SWA_hist[iIndex]+0.25
elif (iSelect==2):
tmpImg =cv2.imread(RightIMGPath[i].strip(),1)
y_batch[i] = SWA_hist[iIndex]-0.25
if (iShowDebugPic==2):
plt.subplot(321)
plt.imshow(tmpImg)
#plt.show()
plt.subplot(322)
plt.imshow(cv2.resize(tmpImg,(2*64, 64), interpolation = cv2.INTER_CUBIC))
#plt.show()
plt.subplot(323)
plt.imshow(preprocess(tmpImg))
plt.subplot(324)
plt.imshow(cv2.resize(preprocess(tmpImg),(2*64, 64), interpolation = cv2.INTER_CUBIC))
plt.subplot(325)
tmpImg2 = tmpImg[ :, ::-1, :]
plt.imshow(tmpImg2)
plt.subplot(326)
plt.imshow(cv2.resize(preprocess(tmpImg2),(2*64, 64), interpolation = cv2.INTER_CUBIC))
plt.show()
iFlipImg = randint(0,4)
if (iFlipImg>1):
tmpImg = tmpImg[ :, ::-1, :]
y_batch[i] = -y_batch[i]
tmpImg = cv2.cvtColor(tmpImg,cv2.COLOR_BGR2RGB)
tmpImg=preprocess(tmpImg)
tmpImg = augment_brightness_camera_images(tmpImg)
if ((iFlipImg==0) or (iFlipImg==4)):
if (iSelect==10):
test3=(tmpImg)
if (iSelect==11):
tmpImg,y_batch[i],tr_x = trans_image(tmpImg,y_batch[i],50)
if (iSelect==12):
tmpImg,y_batch[i],tr_x = trans_image(tmpImg,y_batch[i],-50)
tmpImg,y_batch[i],tr_x = trans_image(tmpImg,y_batch[i],100)
X_batch[i] = tmpImg
#newShape = X_batch[i].shape
#X_batch[i] = cv2.resize(tmpImg,(newShape[1], newShape[0]), interpolation = cv2.INTER_CUBIC)
if ((abs(y_batch[i])<0.001) & (iFlipImg==1)):
i=i
else:
i=i+1
#y_batch[i-iIndex] = SWA_hist[i]
#X_batch = CenterImg
#y_batch = SWA_corrected
assert len(X_batch) == batch_size, 'len(X_batch) == batch_size should be True'
yield np.array(X_batch), np.array(y_batch)
def normalize_grayscale(image_data):
a = -0.5
b = 0.5
grayscale_min = 0
grayscale_max = 255
return a + ( ( (image_data - grayscale_min)*(b - a) )/( grayscale_max - grayscale_min ) )
def save_model(model, model_name='model.json', weights_name='model.h5'):
"""
Model save
"""
json_string = model.to_json()
print(json_string)
with open(model_name, 'w') as outfile:
json.dump(json_string, outfile)
model.save_weights(weights_name)
#sFilePathInput = r'C:\Users\Christoph\Documents\udacity\11_Cloning\simulator-windows-64\driving_log.csv'
sFilePathInput = r'driving_log.csv'
sPathReplace = 'IMG/'
#sPathReplace = 'C:\\Users\\Christoph\\Documents\\udacity\\11_Cloning\\simulator-windows-64\\IMG\\'
SWA_hist=[]
Speed_hist=[]
CenterIMGPath = []
LeftIMGPath = []
RightIMGPath = []
File = open(sFilePathInput,'r')
ImgShape =[]
csvInput = csv.reader(File, delimiter=',')
for row in csvInput:
#print(row)
#image = cv2.imread(row[0],0)
if (float(row[6])>20):
SWA_hist.append(float(row[3]))
CenterIMGPath.append(row[0].replace(sPathReplace,str(os.getcwd())+'/IMG/'))
LeftIMGPath.append(row[1].replace(sPathReplace,str(os.getcwd())+'/IMG/'))
RightIMGPath.append(row[2].replace(sPathReplace,str(os.getcwd())+'/IMG/'))
#cv2.imshow('image',image)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
print('Testfile at {}'.format(CenterIMGPath[0]))
image_example = cv2.imread(CenterIMGPath[0],1)
ImgShape =image_example.shape
print('Testfile shape = {} x {} x {}'.format(ImgShape[0],ImgShape[1],ImgShape[2]))
image_example = (preprocess(image_example))
newShape = image_example.shape
print('Testfile will be resized to shape = {} x {} x {}'.format(newShape[0],newShape[1],newShape[2]))
if (iShowDebugPic==1):
plt.subplot(231)
plt.imshow(LeftImg[30])
plt.subplot(232)
plt.imshow(CenterImg[30])
plt.subplot(233)
plt.imshow(RightImg[30])
plt.show()
if (iShowDebugPic==3):
plt.hist(SWA_hist)
plt.savefig("histSWA.png")
#### NN
tf.python.control_flow_ops = tf
while (iShowDebugPic>6):
#X_batch = []
#y_batch = []
dumpFileName=''
batch_size=16
iIndex = randint(0,len(CenterIMGPath)-batch_size)
#X_batch = np.zeros([batch_size,ImgShape[0],ImgShape[1],ImgShape[2]])
X_batch = np.zeros([batch_size,2*64,64,ImgShape[2]])
#LeftImg = np.zeros([batch_size,ImgShape[0],ImgShape[1],ImgShape[2]])
#RightImg = np.zeros([batch_size,ImgShape[0],ImgShape[1],ImgShape[2]])
y_batch = np.zeros(batch_size)
for i in range(iIndex,iIndex+batch_size):
iSelect = randint(0,2)
iIndex = randint(0,len(CenterIMGPath)-batch_size)
if (iSelect==0):
tmpImg = cv2.imread(CenterIMGPath[i],1)
dumpFileName= CenterIMGPath[i]
#X_batch[i-iIndex] =cv2.imread(CenterIMGPath[i],1)
elif (iSelect==1):
tmpImg =cv2.imread(LeftIMGPath[i].strip(),1)
dumpFileName=LeftIMGPath[i]
#y_batch[i-iIndex] = SWA_hist[i]-0.2
elif (iSelect==2):
tmpImg =cv2.imread(RightIMGPath[i].strip(),1)
dumpFileName=RightIMGPath[i]
#y_batch[i-iIndex] = SWA_hist[i]+0.2
tmpSWA = SWA_hist[i]
tmpImg = cv2.cvtColor(tmpImg,cv2.COLOR_BGR2RGB)
if (iShowDebugPic==6):
sp=plt.subplot(421)
plt.imshow(tmpImg)
sp.set_title('Axis [1,1]')
#plt.show()
plt.subplot(422)
plt.imshow(cv2.resize(tmpImg,(2*64, 64), interpolation = cv2.INTER_CUBIC))
#plt.show()
plt.subplot(423)
plt.imshow(preprocess(tmpImg))
plt.subplot(424)
plt.imshow(cv2.resize(preprocess(tmpImg),(2*64, 64), interpolation = cv2.INTER_CUBIC))
plt.subplot(425)
tmpImg2 = tmpImg[ :, ::-1, :]
plt.imshow(tmpImg2)
plt.subplot(426)
plt.imshow(cv2.resize(preprocess(tmpImg2),(2*64, 64), interpolation = cv2.INTER_CUBIC))
v_delta = .05
tmpImg3 = preprocess(
tmpImg,
top_offset=random.uniform(.375 - v_delta, .375 + v_delta),
bottom_offset=random.uniform(.125 - v_delta, .125 + v_delta)
)
plt.subplot(427)
plt.imshow(preprocess(tmpImg3))
plt.subplot(428)
plt.imshow(cv2.resize(preprocess(tmpImg3),(2*64, 64), interpolation = cv2.INTER_CUBIC))
plt.show()
tmpImg4 = trans_image(tmpImg,tmpSWA,80)
plt.subplot(311)
plt.imshow(tmpImg)
#plt.show()
plt.subplot(312)
plt.imshow(tmpImg4[0][0:160][:][:])
#plt.show()
plt.subplot(313)
plt.imshow(preprocess(tmpImg4[0][0:160][:][:]))
plt.show()
if (iShowDebugPic==7):
sPathReplace = 'C:\\Users\\Christoph\\Documents\\udacity\\11_Cloning\\simulator-windows-64\\IMG\\'
sp1=plt.subplot(321)
#plt.title('Orginal Img'+dumpFileName)
plt.imshow(tmpImg)
plt.imsave('00_OrginalImg.jpg',tmpImg)
sp1.set_title('Orginal Img'+ dumpFileName.replace(sPathReplace,''))
#plt.show()
sp2=plt.subplot(322)
plt.imshow(preprocess(tmpImg))
plt.imsave('01_CropedImg.jpg',preprocess(tmpImg))
sp2.set_title('Croped Img - SWA orginal = '+str(tmpSWA))
#plt.show()
sp3=plt.subplot(323)
if (iSelect==1):
tmpSWA = tmpSWA +0.2
elif (iSelect==2):
tmpSWA = tmpSWA -0.2
test = (preprocess(tmpImg))
newShape = test.shape
#plt.imshow(cv2.resize(preprocess(tmpImg),(newShape[1], newShape[0]), interpolation = cv2.INTER_CUBIC))
plt.imshow(test)
plt.imsave('02_ReizedWithSWAoffset.jpg',test)
sp3.set_title('Resized 40 x 160 - SWA with offset = '+str(tmpSWA) )
sp4=plt.subplot(324)
test = test[ :, ::-1, :]
#plt.imshow(cv2.resize(preprocess(tmpImg2),(newShape[1], newShape[0]), interpolation = cv2.INTER_CUBIC))
plt.imsave('03_FlippedImg.jpg',test)
plt.imshow(test)
sp4.set_title('Flipped - SWA with offset and flipped= '+str(-tmpSWA) )
sp5=plt.subplot(325)
if (iSelect==0):
test3,y_steer,tr_x = trans_image(preprocess(tmpImg),tmpSWA,1)
if (iSelect==1):
test3,y_steer,tr_x = trans_image(preprocess(tmpImg),tmpSWA,50)
if (iSelect==2):
test3,y_steer,tr_x = trans_image(preprocess(tmpImg),tmpSWA,-50)
plt.imshow(test3)
sp5.set_title('Shiftet Img - SWA transformed = '+str(y_steer))
plt.imsave('04_ShiftetImg.jpg',test3)
sp6=plt.subplot(326)
test4 = augment_brightness_camera_images(preprocess(tmpImg))
plt.imshow(test4)
plt.imsave('05_BrightnessImg.jpg',test4)
sp6.set_title('Changed brightness - final img')
plt.show()
number_of_epochs = 8
number_of_samples_per_epoch = 20032
number_of_validation_samples = 6400
learning_rate = 1e-4
activation_relu = 'relu'
# Source: https://images.nvidia.com/content/tegra/automotive/images/2016/solutions/pdf/end-to-end-dl-using-px.pdf
model = Sequential()
#model.add(Lambda(lambda x: x/255.-0.5,input_shape=(newShape[0], newShape[1], 3)))
model.add(Lambda(lambda x: x / 127.5 - 1.0, input_shape=(newShape[0], newShape[1], 3)))
model.add(Convolution2D(24, 5, 5, border_mode='same', input_shape=(newShape[0], newShape[1], 3)))
model.add(Activation(activation_relu))
model.add(Dropout(0.5))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(Convolution2D(36, 5, 5, border_mode='same', subsample=(2, 2)))
model.add(Activation(activation_relu))
model.add(Dropout(0.5))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(Convolution2D(48, 5, 5, border_mode='same', subsample=(2, 2)))
model.add(Activation(activation_relu))
model.add(Dropout(0.5))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(Convolution2D(64, 3, 3, border_mode='same', subsample=(1, 1)))
model.add(Activation(activation_relu))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(Convolution2D(64, 3, 3, border_mode='same', subsample=(1, 1)))
model.add(Activation(activation_relu))
model.add(Dropout(0.5))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(Flatten())
# Next, five fully connected layers
model.add(Dense(1164))
#model.add(Dropout(0.5))
model.add(Activation(activation_relu))
model.add(Dense(100))
#model.add(Dropout(0.5))
model.add(Activation(activation_relu))
model.add(Dense(50))
#model.add(Dropout(0.5))
model.add(Activation(activation_relu))
model.add(Dense(10))
#model.add(Dropout(0.5))
model.add(Activation(activation_relu))
model.add(Dense(1))
model.compile(optimizer=Adam(learning_rate), loss="mse", )
model.summary()
t1=time.time()
# fits the model on batches with real-time data augmentation:
SWA_overall = np.zeros(len(CenterIMGPath)*5)
model.fit_generator(generate_next_batch(),samples_per_epoch=len(CenterIMGPath)*10, nb_epoch=5)
t2=time.time()
print('Time: {}s'.format(t2-t1))
save_model(model)
|
gpl-2.0
|
saltastro/saltefficiency
|
nightly/create_night_table.py
|
1
|
11144
|
import string
import datetime
import numpy as np
import pylab as pl
from matplotlib.patches import Rectangle
import blockvisitstats as bvs
import sdb_utils as su
def create_night_table(obsdate, sdb, els):
"""Create a table that shows a break down for the night and what happened in each block
Parameters
----------
nid: int
NightInfo_Id
sdb: ~mysql.mysql
A connection to the sdb database
els: ~mysql.mysql
A connection to the els database
"""
# create a dictionary to break down the events of the night
night_dict = {}
nid = su.getnightinfo(sdb, obsdate)
#get the times for the night
record=sdb.select('EveningTwilightEnd, MorningTwilightStart', 'NightInfo', 'NightInfo_Id=%i' % nid)
stime=record[0][0]
etime=record[0][1]
totaltime=(etime-stime).seconds
night = Night(nid, stime, etime)
#get the SO event log
record=sdb.select('EventType_Id, EventTime', 'SoLogEvent', 'NightInfo_Id=%i' % nid)
#set it up wtih the correct time
event_list=[]
for i in range(len(record)):
t=record[i][1].seconds
#convert to times from the observing day
if t < 12*3600:
t+=12*3600
else:
t-=12*3600
event_list.append([record[i][0],t])
# add weather down time to night_dict
time_list,wea_arr = create_weather(els, stime, etime)
night.add_weather(time_list, wea_arr)
# add the accepted blocks to night_dict
block_list=bvs.blockvisitstats(sdb, obsdate, update=False)
for b in block_list:
print b
night_dict[b[1]] = ['Science', b]
night.add_blocks(block_list)
# add fault down time to the night_dict
faults = create_faults(sdb, nid)
problem_list=[]
for f in faults:
t1 = (f[1]-night.day_start).seconds
t2 = (f[2]-night.day_start).seconds
problem_list.append([t1, t2])
print f
night_dict[f[1]] = ['Fault', f]
night.add_problems(problem_list)
# add mirror alignment to the night_dict
mirror_alignment=create_mirror_alignment(event_list)
night.add_mirroralignment(mirror_alignment)
for m in mirror_alignment:
t1 = convert_decimal_hours(obsdate, m[0]/3600.0)
night_dict[t1] = ['Mirror', m]
# use the dict to populate the table to display what did happen
night.calc_engineering()
night.calc_weather()
#night.plot()
info_txt="""
Total Time: {5:0.2f} <br>
Science Time: {0:0.2f} <br>
Engineering TIme: {4: 0.2f} <br>
Weather Time: {1:0.2f} <br>
Time Lost to Problems: {3:0.2f} <br>\n
<br>
Mirror Alignment Time: {2:0.2f} <br>\n
""".format(night.sciencetime, night.weathertime, night.mirroralignmenttime, night.problemtime, night.engineertime, night.totaltime/3600.0)
table_txt ='<p><table>'
table_txt +='<tr><th>Time</th><th>Type</th><th>Length</th><th>Comment</th></tr>\n'
status = 0
start = None
for i in range(len(night.status_arr)):
if start is not None and night.status_arr[i]!=status:
table_txt += create_row(sdb, start, i, night_dict, night, obsdate)
start=None
if night.status_arr[i]>0 and start is None:
status = night.status_arr[i]
start = i
table_txt +='</table></p>\n'
return info_txt + table_txt
def create_row(sdb, sid, eid, night_dict, night, obsdate):
"""Create a row with all the information for that block
"""
status = night.status_arr[sid]
t1 = convert_decimal_hours(obsdate, night.time_arr[sid])
t2 = convert_decimal_hours(obsdate, night.time_arr[eid])
l=(t2-t1).seconds/3600.0
length='{0}:{1}'.format(int(l), string.zfill(int(60.0*(l-int(l))),2))
#find the key
min_time = 600
block_time = None
t0 = convert_decimal_hours(obsdate, 1.0)
for k in night_dict.keys():
t = (t1 - t0).seconds - (k-t0).seconds
if abs(t) < min_time:
block_time = k
min_time = abs(t)
keys = night_dict.keys()
#create the row
fgcolor='#000000'
if status==1: fgcolor='#FFFFFF'
row_str='<tr height={5}><td>{0}<br>{1}</td><td bgcolor="{2}"><font color="{4}">{3}</font></td>'.format(t1, t2, night.color_list[status], night.statusname_list[status], fgcolor, 50*l)
row_str+='<td>{0}</td>'.format(length)
if status==1 and block_time is not None:
print block_time
b = night_dict[block_time][1]
print b
row_str+='<td>{0}</td>'.format(b[4])
row_str+='</tr>\n'
return row_str
def convert_decimal_hours(obsdate, t1):
"""Convert decimal hours to a date time"""
if t1 < 12:
t1 = '{} {}:{}'.format(obsdate, int(t1), int(60*(t1-int(t1))))
t1 = datetime.datetime.strptime(t1, '%Y%m%d %H:%M')
t1 = t1+datetime.timedelta(seconds=12*3600)
else:
t1 = '{} {}:{}'.format(obsdate, int(t1-12), int(60*(t1-int(t1))))
t1 = datetime.datetime.strptime(t1, '%Y%m%d %H:%M')
t1 = t1+datetime.timedelta(seconds=24*3600)
return t1
def create_faults(sdb, nid):
"""Return a list of information about the faults
"""
select = 'Fault_id, FaultStart, FaultEnd, TimeLost, SaltSubsystem'
tables = 'Fault join SaltSubsystem using (SaltSubsystem_Id)'
logic = 'NightInfo_Id={} and TimeLost > 0'.format(nid)
return sdb.select(select, tables, logic)
def create_weather(els, stime, etime):
"""Return an array of times that the weather was bad
"""
weather_info = su.get_weather_info(els, stime, etime)
time_list, air_arr, dew_arr, hum_arr, w30_arr, w30d_arr, w10_arr, \
w10d_arr, rain_list, t02_arr, t05_arr, t10_arr, t15_arr, t20_arr, \
t25_arr, t30_arr = weather_info
#need to include other/better limits
wea_arr = (hum_arr>85.0)
return time_list, wea_arr
def create_mirror_alignment(event_list):
"""Determine the mirror alignment time
"""
mirror_list=[]
mirror_start=False
for r in event_list:
if r[0]==10 and mirror_start==False:
t=r[1]
#use the time from the
if old_event[0] in [4, 6, 13, 14]: t=old_event[1]
mirror_start=[t]
if mirror_start:
if r[0] in [3,5]:
t=r[1]
mirror_start.append(t)
mirror_list.append(mirror_start)
mirror_start=False
old_event = r
return mirror_list
class Night:
def __init__(self, nid, night_start, night_end):
self.nid=nid
self.night_start = night_start
self.night_end = night_end
self.totaltime=(self.night_end-self.night_start).seconds
self.sciencetime=0
self.engineertime=0
self.weathertime=0
self.problemtime=0
self.shuttertime=0
self.mirroralignmenttime=0
self.dt=0.1
#set up arrays to represent different events
self.time_arr = np.arange(0,24,self.dt) #array representing time since noon that day
self.day_start = datetime.datetime(self.night_start.year, self.night_start.month, self.night_start.day, 12,0,0)
self.night_time = (self.time_arr > (self.night_start-self.day_start).seconds/3600.0) * ( self.time_arr < (self.night_end-self.day_start).seconds/3600.0)
self.status_arr = np.zeros(len(self.time_arr), dtype=int)
self.stime = (self.night_start-self.day_start).seconds/3600.0
self.etime = (self.night_end-self.day_start).seconds/3600.0
#set color and name list
self.statusname_list=['none', 'Science', 'Engineer', 'Weather', 'Problem', 'Rejected']
self.color_list=['none', 'blue', 'green', 'purple', 'red','yellow'] #none, science, engineer, weather, problem, rejected
def add_weather(self, time_list, wea_arr):
"""Add the weather to the status array and weather
the telescope is closed for weather or not
time_list is in seconds since the start of the night
"""
nstart = (self.night_start-self.day_start).seconds
for i in range(len(time_list)):
if wea_arr[i]:
t = int((nstart + time_list[i])/3600.0/self.dt)
self.status_arr[t]=3
return
def add_mirroralignment(self, mirror_list):
"""Add the mirror alignment to the status array
"""
for t1,t2 in mirror_list:
t1=t1/3600.0
t2=t2/3600.0
if t1 > self.stime and t1 < self.etime:
self.mirroralignmenttime += t2-t1
print t1, t2, self.stime, self.etime, self.mirroralignmenttime
mask = (self.time_arr>t1)*(self.time_arr<t2)
mid = np.where(mask)[0]
self.status_arr[mid] = 2
def add_problems(self, problems_list):
"""Add the problems to the status array
"""
for t1,t2 in problems_list:
t1=t1/3600.0
t2=t2/3600.0
if t1 < self.etime and t2 > self.stime:
et2 = min(t2, self.etime)
self.problemtime += et2-t1
mask = (self.time_arr>t1)*(self.time_arr<t2)
mid = np.where(mask)[0]
self.status_arr[mid] = 4
def add_blocks(self, block_list):
"""Add science time blocks to the status array
"""
for bvid, t1,t2,stat, propcode in block_list:
t1 = (t1- self.day_start).seconds/3600.0
t2 = (t2- self.day_start).seconds/3600.0
et1 = max(self.stime, t1)
if t1 < self.etime:
et2 = min(t2, self.etime)
mask = (self.time_arr>t1)*(self.time_arr<t2)
mid = np.where(mask)[0]
if stat==0:
self.sciencetime += et2-et1
self.status_arr[mid] = 1
else:
self.status_arr[mid] = 5
if stat==3: self.weathertime += et2-et1
if stat<3:
pass #self.problemtime += et2-et1
#print 'reject', self.problemtime, et2,et1
def calc_engineering(self):
for i in range(len(self.time_arr)):
if self.time_arr[i]>self.stime and self.time_arr[i]<self.etime:
if self.status_arr[i]==2 or self.status_arr[i]==0:
self.engineertime += self.dt
def calc_weather(self):
for i in range(len(self.time_arr)):
if self.time_arr[i]>self.stime and self.time_arr[i]<self.etime:
if self.status_arr[i]==3:
self.weathertime += self.dt
def plot(self):
color_list=['none', 'blue', 'green', 'purple', 'red','yellow'] #none, science, engineer, weather, problem, rejected
pl.figure()
ax=pl.axes([0.1,0.1,0.8,0.8])
#add nightiime patch
ax.add_patch(Rectangle((self.stime,0),self.totaltime/3600.0,4, alpha=0.3))
#add status patches
for i in range(len(self.status_arr)):
if self.status_arr[i]>0:
color=color_list[self.status_arr[i]]
ax.add_patch(Rectangle((self.time_arr[i],1),self.dt,0.5, alpha=1.0, facecolor=color, edgecolor=color)) #color_list[self.status_arr[i]]))
ax.axis([7,17,1,1.5])
pl.show()
|
bsd-3-clause
|
StartupsPoleEmploi/labonneboite
|
labonneboite/common/load_data.py
|
1
|
8478
|
import os
import pickle
import csv
import pandas as pd
import math
from functools import lru_cache, reduce
from collections import defaultdict
USE_ROME_SLICING_DATASET = False # Rome slicing dataset is not ready yet
if USE_ROME_SLICING_DATASET:
OGR_ROME_FILE = "rome_slicing_dataset/ogr_rome_mapping.csv"
ROME_FILE = "rome_slicing_dataset/rome_labels.csv"
ROME_NAF_FILE = "rome_slicing_dataset/rome_naf_mapping.csv"
else:
OGR_ROME_FILE = "ogr_rome_mapping.csv"
ROME_FILE = "rome_labels.csv"
ROME_NAF_FILE = "rome_naf_mapping.csv"
def load_file(func, filename):
full_filename = os.path.join(os.path.dirname(
os.path.realpath(__file__)), "data/%s" % filename)
return func(full_filename)
def load_pickle_file(filename):
def f(full_filename):
return pickle.load(open(full_filename, "r"))
return load_file(f, filename)
def load_pd_dataframe(filename, delimiter='', dtype=None):
def f(full_filename):
return pd.read_csv(open(full_filename, "r"), dtype=dtype)
return load_file(f, filename)
def load_csv_file(filename, delimiter='|'):
def f(full_filename):
csv_file = open(full_filename, 'r')
reader = csv.reader(csv_file, delimiter=delimiter)
return reader
reader = load_file(f, filename)
rows = []
len_previous_row = None
for row in reader:
if len_previous_row:
# at least second line of CSV file
if len(row) == 0:
# skip empty rows
continue
elif len(row) != len_previous_row:
raise IndexError(
"found row with abnormal number of fields : %s" % row)
rows.append(row)
else:
# first line of CSV file: headers should be ignored
pass
len_previous_row = len(row)
return rows
def load_rows_as_set(rows):
for row in rows:
if len(row) != 1:
raise IndexError("wrong number of fields")
return set([row[0] for row in rows])
def load_rows_as_dict(rows):
d = {}
for row in rows:
if len(row) != 2:
raise IndexError("wrong number of fields")
if row[0] in d:
raise ValueError("duplicate key")
d[row[0]] = row[1]
return d
def load_rows_as_dict_of_dict(rows):
d = {}
for row in rows:
if len(row) != 3:
raise IndexError("wrong number of fields")
# values of 3 fields
f1 = row[0]
f2 = row[1]
f3 = row[2]
if f1 in d:
if f2 in d[f1]:
raise ValueError("duplicate key")
else:
d[f1][f2] = f3
else:
d[f1] = {f2: f3}
return d
@lru_cache(maxsize=None)
def load_related_rome_areas():
"""
Build a dict with department code (code insee) as keys and area code as values (bassins d'emploi).
Used for PSE study in 2021.
"""
rows = load_csv_file("lbb-pse_bassin-emploi_code-insee.csv", delimiter=',')
return reduce(reduceRelateRomesAreas, rows, {})
def reduceRelateRomesAreas(aggr, row):
[code_insee, code_area] = row
aggr[code_insee] = code_area
return aggr
@lru_cache(maxsize=None)
def load_related_rome():
"""
Build a dict with area code (bassin d'emploi) as keys.
The values are dict with rome code as keys and a list of related rome codes as values.
Each related rome is a dict with `rome` and `score` properties.
Used for PSE study.
"""
rows = load_csv_file("lbb-pse_bassin-emploi_rome-connexe.csv", delimiter=',')
return reduce(reduceRelateRomes, rows, {})
def reduceRelateRomes(aggr, row):
[code_area, rome, rome_connexe, score] = row
entry_code_area = aggr.get(code_area, {})
entry_rome = entry_code_area.get(rome, [])
entry_rome.append({'rome': rome_connexe, 'score': float(score)})
entry_code_area[rome] = entry_rome
aggr[code_area] = entry_code_area
return aggr
@lru_cache(maxsize=None)
def load_city_codes():
rows = load_csv_file("city_codes.csv")
commune_id_to_commune_name = load_rows_as_dict(rows)
return commune_id_to_commune_name
@lru_cache(maxsize=None)
def load_contact_modes():
"""
Use comma delimiter instead of pipe so that it is recognized by github
and can easily be edited online by the intrapreneurs.
"""
rows = load_csv_file("contact_modes.csv", delimiter=',')
naf_prefix_to_rome_to_contact_mode = load_rows_as_dict_of_dict(rows)
return naf_prefix_to_rome_to_contact_mode
@lru_cache(maxsize=None)
def load_ogr_labels():
rows = load_csv_file("ogr_labels.csv")
ogr_to_label = load_rows_as_dict(rows)
return ogr_to_label
@lru_cache(maxsize=None)
def load_groupements_employeurs():
rows = load_csv_file("groupements_employeurs.csv")
sirets = load_rows_as_set(rows)
return sirets
@lru_cache(maxsize=None)
def load_ogr_rome_mapping():
rows = load_csv_file(OGR_ROME_FILE)
OGR_COLUMN = 0
ROME_COLUMN = 1
ogr_to_rome = {}
for row in rows:
ogr = row[OGR_COLUMN]
if ogr not in load_ogr_labels():
raise ValueError("missing label for OGR %s" % ogr)
rome = row[ROME_COLUMN]
if rome not in load_rome_labels():
raise ValueError("missing label for ROME %s" % rome)
ogr_to_rome[ogr] = rome
return ogr_to_rome
@lru_cache(maxsize=None)
def load_rome_labels():
rows = load_csv_file(ROME_FILE)
rome_to_label = load_rows_as_dict(rows)
return rome_to_label
@lru_cache(maxsize=None)
def load_naf_labels():
rows = load_csv_file("naf_labels.csv")
naf_to_label = load_rows_as_dict(rows)
return naf_to_label
@lru_cache(maxsize=None)
def load_rome_naf_mapping():
return load_csv_file(ROME_NAF_FILE, delimiter=',')
@lru_cache(maxsize=None)
def load_metiers_tension():
csv_metiers_tension = load_csv_file("metiers_tension.csv", ',')
rome_to_tension = defaultdict(int)
for row in csv_metiers_tension:
tension_pct = row[2]
rome_code = row[3]
# FIXME : remove rows where tension is #N/A in the dataset, to remove this ugly check ?
if tension_pct != '#N/A':
tension_pct = float(tension_pct)
if 0 <= tension_pct <= 100:
# As a single ROME can have multiple tensions,
# It has been decided to take the higher tension for a rome
rome_to_tension[rome_code] = max(rome_to_tension[rome_code], tension_pct)
else:
raise ValueError
return rome_to_tension
#Used for PSE study, it returns a list of SIRET that must not b be seen on LBB
@lru_cache(maxsize=None)
def load_siret_to_remove():
rows = load_csv_file("untreated_BB.csv", ',')
sirets_to_remove = load_rows_as_set(rows)
return sirets_to_remove
#Used by importer job to extract etablissement
@lru_cache(maxsize=None)
def load_effectif_labels():
'''
Dataframe to load look like this.
code label
0 0 0-0
1 1 1-2
2 2 3-5
3 3 6-9
4 11 10-19
5 12 20-49
6 21 50-99
7 22 100-199
8 31 200-249
9 32 250-499
10 41 500-999
11 42 1000-1999
12 51 2000-4999
13 52 5000-9999
14 53 10000+
'''
def create_column(row, which='start_effectif'):
'''
From the label, we want to create a start and end column to delimitate the interval
We'll be able to use it to simply determine from a number of employees in an office, in which category the office belongs to
'''
#we split on the label which is from type "10-19" OR 10000+
splitted_label = row['label'].split('-')
if len(splitted_label) == 1: #10000+
value = math.inf if which == 'end_effectif' else 10000
else:
if which == 'start_effectif':
value = int(splitted_label[0])
else:
value = int(splitted_label[1])
return value
df = load_pd_dataframe("helpers/effectif_labels.csv", ',', dtype={'code':str})
df['start_effectif'] = df.apply(lambda row: create_column(row,'start_effectif'), axis=1)
df['end_effectif'] = df.apply(lambda row: create_column(row,'end_effectif'), axis=1)
return df
OGR_ROME_CODES = load_ogr_rome_mapping()
ROME_CODES = list(OGR_ROME_CODES.values())
|
agpl-3.0
|
toastedcornflakes/scikit-learn
|
examples/applications/plot_stock_market.py
|
76
|
8522
|
"""
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux [email protected]
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
try:
from matplotlib.finance import quotes_historical_yahoo_ochl
except ImportError:
# quotes_historical_yahoo_ochl was named quotes_historical_yahoo before matplotlib 1.4
from matplotlib.finance import quotes_historical_yahoo as quotes_historical_yahoo_ochl
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [quotes_historical_yahoo_ochl(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
|
bsd-3-clause
|
kashif/scikit-learn
|
examples/tree/plot_tree_regression_multioutput.py
|
22
|
1848
|
"""
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
s = 50
plt.scatter(y[:, 0], y[:, 1], c="navy", s=s, label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="cornflowerblue", s=s, label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="c", s=s, label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="orange", s=s, label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
|
bsd-3-clause
|
plowman/python-mcparseface
|
models/syntaxnet/tensorflow/tensorflow/contrib/learn/python/learn/io/pandas_io.py
|
7
|
2102
|
"""Methods to allow pandas.DataFrame."""
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
PANDAS_DTYPES = {'int8': 'int', 'int16': 'int', 'int32': 'int', 'int64': 'int',\
'uint8': 'int', 'uint16': 'int', 'uint32': 'int', 'uint64': 'int', 'float16': 'float',\
'float32': 'float', 'float64': 'float', 'bool': 'i'}
def extract_pandas_data(data):
"""Extract data from pandas.DataFrame for predictors"""
if not isinstance(data, pd.DataFrame):
return data
if all(dtype.name in PANDAS_DTYPES for dtype in data.dtypes):
return data.values.astype('float')
else:
raise ValueError('Data types for data must be int, float, or bool.')
def extract_pandas_matrix(data):
"""Extracts numpy matrix from pandas DataFrame."""
if not isinstance(data, pd.DataFrame):
return data
return data.as_matrix()
def extract_pandas_labels(labels):
"""Extract data from pandas.DataFrame for labels"""
if isinstance(labels,
pd.DataFrame): # pandas.Series also belongs to DataFrame
if len(labels.columns) > 1:
raise ValueError('Only one column for labels is allowed.')
if all(dtype.name in PANDAS_DTYPES for dtype in labels.dtypes):
return labels.values.astype('float')
else:
raise ValueError('Data types for labels must be int, float, or bool.')
else:
return labels
|
apache-2.0
|
emon10005/scikit-image
|
skimage/transform/tests/test_radon_transform.py
|
16
|
14464
|
from __future__ import print_function, division
import numpy as np
from numpy.testing import assert_raises
import itertools
import os.path
from skimage.transform import radon, iradon, iradon_sart, rescale
from skimage.io import imread
from skimage import data_dir
from skimage._shared.testing import test_parallel
PHANTOM = imread(os.path.join(data_dir, "phantom.png"),
as_grey=True)[::2, ::2]
PHANTOM = rescale(PHANTOM, 0.5, order=1)
def _debug_plot(original, result, sinogram=None):
from matplotlib import pyplot as plt
imkwargs = dict(cmap='gray', interpolation='nearest')
if sinogram is None:
plt.figure(figsize=(15, 6))
sp = 130
else:
plt.figure(figsize=(11, 11))
sp = 221
plt.subplot(sp + 0)
plt.imshow(sinogram, aspect='auto', **imkwargs)
plt.subplot(sp + 1)
plt.imshow(original, **imkwargs)
plt.subplot(sp + 2)
plt.imshow(result, vmin=original.min(), vmax=original.max(), **imkwargs)
plt.subplot(sp + 3)
plt.imshow(result - original, **imkwargs)
plt.colorbar()
plt.show()
def _rescale_intensity(x):
x = x.astype(float)
x -= x.min()
x /= x.max()
return x
def check_radon_center(shape, circle):
# Create a test image with only a single non-zero pixel at the origin
image = np.zeros(shape, dtype=np.float)
image[(shape[0] // 2, shape[1] // 2)] = 1.
# Calculate the sinogram
theta = np.linspace(0., 180., max(shape), endpoint=False)
sinogram = radon(image, theta=theta, circle=circle)
# The sinogram should be a straight, horizontal line
sinogram_max = np.argmax(sinogram, axis=0)
print(sinogram_max)
assert np.std(sinogram_max) < 1e-6
def test_radon_center():
shapes = [(16, 16), (17, 17)]
circles = [False, True]
for shape, circle in itertools.product(shapes, circles):
yield check_radon_center, shape, circle
rectangular_shapes = [(32, 16), (33, 17)]
for shape in rectangular_shapes:
yield check_radon_center, shape, False
def check_iradon_center(size, theta, circle):
debug = False
# Create a test sinogram corresponding to a single projection
# with a single non-zero pixel at the rotation center
if circle:
sinogram = np.zeros((size, 1), dtype=np.float)
sinogram[size // 2, 0] = 1.
else:
diagonal = int(np.ceil(np.sqrt(2) * size))
sinogram = np.zeros((diagonal, 1), dtype=np.float)
sinogram[sinogram.shape[0] // 2, 0] = 1.
maxpoint = np.unravel_index(np.argmax(sinogram), sinogram.shape)
print('shape of generated sinogram', sinogram.shape)
print('maximum in generated sinogram', maxpoint)
# Compare reconstructions for theta=angle and theta=angle + 180;
# these should be exactly equal
reconstruction = iradon(sinogram, theta=[theta], circle=circle)
reconstruction_opposite = iradon(sinogram, theta=[theta + 180],
circle=circle)
print('rms deviance:',
np.sqrt(np.mean((reconstruction_opposite - reconstruction)**2)))
if debug:
import matplotlib.pyplot as plt
imkwargs = dict(cmap='gray', interpolation='nearest')
plt.figure()
plt.subplot(221)
plt.imshow(sinogram, **imkwargs)
plt.subplot(222)
plt.imshow(reconstruction_opposite - reconstruction, **imkwargs)
plt.subplot(223)
plt.imshow(reconstruction, **imkwargs)
plt.subplot(224)
plt.imshow(reconstruction_opposite, **imkwargs)
plt.show()
assert np.allclose(reconstruction, reconstruction_opposite)
def test_iradon_center():
sizes = [16, 17]
thetas = [0, 90]
circles = [False, True]
for size, theta, circle in itertools.product(sizes, thetas, circles):
yield check_iradon_center, size, theta, circle
def check_radon_iradon(interpolation_type, filter_type):
debug = False
image = PHANTOM
reconstructed = iradon(radon(image), filter=filter_type,
interpolation=interpolation_type)
delta = np.mean(np.abs(image - reconstructed))
print('\n\tmean error:', delta)
if debug:
_debug_plot(image, reconstructed)
if filter_type in ('ramp', 'shepp-logan'):
if interpolation_type == 'nearest':
allowed_delta = 0.03
else:
allowed_delta = 0.025
else:
allowed_delta = 0.05
assert delta < allowed_delta
def test_radon_iradon():
filter_types = ["ramp", "shepp-logan", "cosine", "hamming", "hann"]
interpolation_types = ['linear', 'nearest']
for interpolation_type, filter_type in \
itertools.product(interpolation_types, filter_types):
yield check_radon_iradon, interpolation_type, filter_type
# cubic interpolation is slow; only run one test for it
yield check_radon_iradon, 'cubic', 'shepp-logan'
def test_iradon_angles():
"""
Test with different number of projections
"""
size = 100
# Synthetic data
image = np.tri(size) + np.tri(size)[::-1]
# Large number of projections: a good quality is expected
nb_angles = 200
radon_image_200 = radon(image, theta=np.linspace(0, 180, nb_angles,
endpoint=False))
reconstructed = iradon(radon_image_200)
delta_200 = np.mean(abs(_rescale_intensity(image) - _rescale_intensity(reconstructed)))
assert delta_200 < 0.03
# Lower number of projections
nb_angles = 80
radon_image_80 = radon(image, theta=np.linspace(0, 180, nb_angles,
endpoint=False))
# Test whether the sum of all projections is approximately the same
s = radon_image_80.sum(axis=0)
assert np.allclose(s, s[0], rtol=0.01)
reconstructed = iradon(radon_image_80)
delta_80 = np.mean(abs(image / np.max(image) -
reconstructed / np.max(reconstructed)))
# Loss of quality when the number of projections is reduced
assert delta_80 > delta_200
def check_radon_iradon_minimal(shape, slices):
debug = False
theta = np.arange(180)
image = np.zeros(shape, dtype=np.float)
image[slices] = 1.
sinogram = radon(image, theta)
reconstructed = iradon(sinogram, theta)
print('\n\tMaximum deviation:', np.max(np.abs(image - reconstructed)))
if debug:
_debug_plot(image, reconstructed, sinogram)
if image.sum() == 1:
assert (np.unravel_index(np.argmax(reconstructed), image.shape)
== np.unravel_index(np.argmax(image), image.shape))
def test_radon_iradon_minimal():
shapes = [(3, 3), (4, 4), (5, 5)]
for shape in shapes:
c0, c1 = shape[0] // 2, shape[1] // 2
coordinates = itertools.product((c0 - 1, c0, c0 + 1),
(c1 - 1, c1, c1 + 1))
for coordinate in coordinates:
yield check_radon_iradon_minimal, shape, coordinate
def test_reconstruct_with_wrong_angles():
a = np.zeros((3, 3))
p = radon(a, theta=[0, 1, 2])
iradon(p, theta=[0, 1, 2])
assert_raises(ValueError, iradon, p, theta=[0, 1, 2, 3])
def _random_circle(shape):
# Synthetic random data, zero outside reconstruction circle
np.random.seed(98312871)
image = np.random.rand(*shape)
c0, c1 = np.ogrid[0:shape[0], 0:shape[1]]
r = np.sqrt((c0 - shape[0] // 2)**2 + (c1 - shape[1] // 2)**2)
radius = min(shape) // 2
image[r > radius] = 0.
return image
def test_radon_circle():
a = np.ones((10, 10))
assert_raises(ValueError, radon, a, circle=True)
# Synthetic data, circular symmetry
shape = (61, 79)
c0, c1 = np.ogrid[0:shape[0], 0:shape[1]]
r = np.sqrt((c0 - shape[0] // 2)**2 + (c1 - shape[1] // 2)**2)
radius = min(shape) // 2
image = np.clip(radius - r, 0, np.inf)
image = _rescale_intensity(image)
angles = np.linspace(0, 180, min(shape), endpoint=False)
sinogram = radon(image, theta=angles, circle=True)
assert np.all(sinogram.std(axis=1) < 1e-2)
# Synthetic data, random
image = _random_circle(shape)
sinogram = radon(image, theta=angles, circle=True)
mass = sinogram.sum(axis=0)
average_mass = mass.mean()
relative_error = np.abs(mass - average_mass) / average_mass
print(relative_error.max(), relative_error.mean())
assert np.all(relative_error < 3.2e-3)
def check_sinogram_circle_to_square(size):
from skimage.transform.radon_transform import _sinogram_circle_to_square
image = _random_circle((size, size))
theta = np.linspace(0., 180., size, False)
sinogram_circle = radon(image, theta, circle=True)
argmax_shape = lambda a: np.unravel_index(np.argmax(a), a.shape)
print('\n\targmax of circle:', argmax_shape(sinogram_circle))
sinogram_square = radon(image, theta, circle=False)
print('\targmax of square:', argmax_shape(sinogram_square))
sinogram_circle_to_square = _sinogram_circle_to_square(sinogram_circle)
print('\targmax of circle to square:',
argmax_shape(sinogram_circle_to_square))
error = abs(sinogram_square - sinogram_circle_to_square)
print(np.mean(error), np.max(error))
assert (argmax_shape(sinogram_square)
== argmax_shape(sinogram_circle_to_square))
def test_sinogram_circle_to_square():
for size in (50, 51):
yield check_sinogram_circle_to_square, size
def check_radon_iradon_circle(interpolation, shape, output_size):
# Forward and inverse radon on synthetic data
image = _random_circle(shape)
radius = min(shape) // 2
sinogram_rectangle = radon(image, circle=False)
reconstruction_rectangle = iradon(sinogram_rectangle,
output_size=output_size,
interpolation=interpolation,
circle=False)
sinogram_circle = radon(image, circle=True)
reconstruction_circle = iradon(sinogram_circle,
output_size=output_size,
interpolation=interpolation,
circle=True)
# Crop rectangular reconstruction to match circle=True reconstruction
width = reconstruction_circle.shape[0]
excess = int(np.ceil((reconstruction_rectangle.shape[0] - width) / 2))
s = np.s_[excess:width + excess, excess:width + excess]
reconstruction_rectangle = reconstruction_rectangle[s]
# Find the reconstruction circle, set reconstruction to zero outside
c0, c1 = np.ogrid[0:width, 0:width]
r = np.sqrt((c0 - width // 2)**2 + (c1 - width // 2)**2)
reconstruction_rectangle[r > radius] = 0.
print(reconstruction_circle.shape)
print(reconstruction_rectangle.shape)
np.allclose(reconstruction_rectangle, reconstruction_circle)
def test_radon_iradon_circle():
shape = (61, 79)
interpolations = ('nearest', 'linear')
output_sizes = (None, min(shape), max(shape), 97)
for interpolation, output_size in itertools.product(interpolations,
output_sizes):
yield check_radon_iradon_circle, interpolation, shape, output_size
def test_order_angles_golden_ratio():
from skimage.transform.radon_transform import order_angles_golden_ratio
np.random.seed(1231)
lengths = [1, 4, 10, 180]
for l in lengths:
theta_ordered = np.linspace(0, 180, l, endpoint=False)
theta_random = np.random.uniform(0, 180, l)
for theta in (theta_random, theta_ordered):
indices = [x for x in order_angles_golden_ratio(theta)]
# no duplicate indices allowed
assert len(indices) == len(set(indices))
@test_parallel()
def test_iradon_sart():
debug = False
image = rescale(PHANTOM, 0.8)
theta_ordered = np.linspace(0., 180., image.shape[0], endpoint=False)
theta_missing_wedge = np.linspace(0., 150., image.shape[0], endpoint=True)
for theta, error_factor in ((theta_ordered, 1.),
(theta_missing_wedge, 2.)):
sinogram = radon(image, theta, circle=True)
reconstructed = iradon_sart(sinogram, theta)
if debug:
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(221)
plt.imshow(image, interpolation='nearest')
plt.subplot(222)
plt.imshow(sinogram, interpolation='nearest')
plt.subplot(223)
plt.imshow(reconstructed, interpolation='nearest')
plt.subplot(224)
plt.imshow(reconstructed - image, interpolation='nearest')
plt.show()
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration) =', delta)
assert delta < 0.02 * error_factor
reconstructed = iradon_sart(sinogram, theta, reconstructed)
delta = np.mean(np.abs(reconstructed - image))
print('delta (2 iterations) =', delta)
assert delta < 0.014 * error_factor
reconstructed = iradon_sart(sinogram, theta, clip=(0, 1))
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration, clip) =', delta)
assert delta < 0.018 * error_factor
np.random.seed(1239867)
shifts = np.random.uniform(-3, 3, sinogram.shape[1])
x = np.arange(sinogram.shape[0])
sinogram_shifted = np.vstack(np.interp(x + shifts[i], x,
sinogram[:, i])
for i in range(sinogram.shape[1])).T
reconstructed = iradon_sart(sinogram_shifted, theta,
projection_shifts=shifts)
if debug:
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(221)
plt.imshow(image, interpolation='nearest')
plt.subplot(222)
plt.imshow(sinogram_shifted, interpolation='nearest')
plt.subplot(223)
plt.imshow(reconstructed, interpolation='nearest')
plt.subplot(224)
plt.imshow(reconstructed - image, interpolation='nearest')
plt.show()
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration, shifted sinogram) =', delta)
assert delta < 0.022 * error_factor
if __name__ == "__main__":
from numpy.testing import run_module_suite
run_module_suite()
|
bsd-3-clause
|
treverhines/RBF
|
docs/scripts/interpolate.e.py
|
1
|
3052
|
'''
Demonstrates optimizing the smoothing parameter for the RBFInterpolator when
the data contains noise
'''
import numpy as np
import matplotlib.pyplot as plt
from rbf.interpolate import RBFInterpolant
np.random.seed(0)
def frankes_test_function(x):
x1, x2 = x[:, 0], x[:, 1]
term1 = 0.75 * np.exp(-(9*x1-2)**2/4 - (9*x2-2)**2/4)
term2 = 0.75 * np.exp(-(9*x1+1)**2/49 - (9*x2+1)/10)
term3 = 0.5 * np.exp(-(9*x1-7)**2/4 - (9*x2-3)**2/4)
term4 = -0.2 * np.exp(-(9*x1-4)**2 - (9*x2-7)**2)
y = term1 + term2 + term3 + term4
return y
# use a 3rd order polyharmonic spline
phi = 'phs3'
# degree of the added polynomial
degree = 1
# shape parameter
epsilon = 1.0
# observation points
y = np.random.random((100, 2))
# observed values at y with noise
d = frankes_test_function(y) + np.random.normal(0.0, 0.1, len(y))
# interpolation points
x = np.mgrid[0:1:200j, 0:1:200j].reshape(2, -1).T
# number of subgroups used for k-fold cross validation
k = 5
def cross_validation(smoothing):
groups = [range(i, len(y), k) for i in range(k)]
error = 0.0
for i in range(k):
train = np.hstack([groups[j] for j in range(k) if j != i])
test = groups[i]
interp = RBFInterpolant(
y[train],
d[train],
phi=phi,
order=degree,
eps=epsilon,
sigma=smoothing
)
error += ((interp(y[test]) - d[test])**2).sum()
mse = error / len(y)
return mse
# range of epsilon values to test
test_smoothings = 10**np.linspace(-4.0, 3.0, 1000)
mses = [cross_validation(s) for s in test_smoothings]
best_mse = np.min(mses)
best_smoothing = test_smoothings[np.argmin(mses)]
print('best smoothing parameter: %.2e (MSE=%.2e)' % (best_smoothing, best_mse))
interp = RBFInterpolant(
y, d,
phi=phi,
order=degree,
eps=epsilon,
sigma=best_smoothing
)
fig, ax = plt.subplots()
ax.loglog(test_smoothings, mses, 'k-')
ax.grid(ls=':', color='k')
ax.set_xlabel(r'$\lambda$')
ax.set_ylabel('cross validation MSE')
ax.plot(best_smoothing, best_mse, 'ko')
ax.text(
best_smoothing,
best_mse,
'(%.2e, %.2e)' % (best_smoothing, best_mse),
va='top'
)
fig.tight_layout()
fig, axs = plt.subplots(2, 1, figsize=(6, 8))
p = axs[0].tripcolor(x[:, 0], x[:, 1], interp(x))
axs[0].scatter(y[:, 0], y[:, 1], c='k', s=5)
axs[0].set_xlim(0, 1)
axs[0].set_ylim(0, 1)
axs[0].set_title(
'RBF interpolant ($\phi$=%s, degree=%s, $\lambda$=%.2e)'
% (phi, degree, best_smoothing)
)
axs[0].set_xlabel('$x_0$')
axs[0].set_ylabel('$x_1$')
axs[0].grid(ls=':', color='k')
axs[0].set_aspect('equal')
fig.colorbar(p, ax=axs[0])
error = np.abs(interp(x) - frankes_test_function(x))
p = axs[1].tripcolor(x[:, 0], x[:, 1], error)
axs[1].scatter(y[:, 0], y[:, 1], c='k', s=5)
axs[1].set_xlim(0, 1)
axs[1].set_ylim(0, 1)
axs[1].set_title('|error|')
axs[1].set_xlabel('$x_0$')
axs[1].set_ylabel('$x_1$')
axs[1].grid(ls=':', color='k')
axs[1].set_aspect('equal')
fig.colorbar(p, ax=axs[1])
fig.tight_layout()
plt.show()
|
mit
|
legacysurvey/legacypipe
|
py/legacyanalysis/overview-paper-wise-forced-phot.py
|
2
|
14128
|
import matplotlib
matplotlib.use('Agg')
matplotlib.rc('text', usetex=True)
matplotlib.rc('font', family='serif')
import pylab as plt
from astrometry.util.fits import *
from astrometry.util.plotutils import *
import numpy as np
import fitsio
from glob import glob
from wise.allwisecat import *
plt.figure(figsize=(5,4))
plt.subplots_adjust(right=0.95, top=0.98)
np.errstate(all='ignore')
# Read DR5 LegacySurvey catalogs
#L = fits_table('/global/homes/d/dstn/cosmo/data/legacysurvey/dr5/sweep/5.0/sweep-240p005-250p010.fits')
#fns = ['/global/homes/d/dstn/cosmo/data/legacysurvey/dr5/sweep/5.0/sweep-240p005-250p010.fits']
fns = glob('/global/project/projectdirs/cosmo/data/legacysurvey/dr5/sweep/5.0/sweep-[12]*p005-*p010.fits')
L = []
for fn in fns:
print('Reading', fn)
L.append(fits_table(fn, columns=['ra','dec','type',
'flux_g','flux_r','flux_z',
'flux_w1','flux_w2','flux_w3', 'flux_w4',
'flux_ivar_g','flux_ivar_r', 'flux_ivar_z',
'flux_ivar_w1','flux_ivar_w2',
'flux_ivar_w3', 'flux_ivar_w4',
'mw_transmission_g','mw_transmission_r',
'mw_transmission_z',
'mw_transmission_w1','mw_transmission_w2',
'mw_transmission_w3', 'mw_transmission_w4',]))
L = merge_tables(L)
print(len(L), 'LegacySurvey sources')
L.cut((L.ra > 120) * (L.ra < 250))
print('Cut to', len(L), 'in RA 120-250')
L.writeto('/global/cscratch1/sd/dstn/ls.fits')
dlo=L.dec.min()
dhi=L.dec.max()
rlo=L.ra.min()
rhi=L.ra.max()
print('RA', rlo,rhi, 'Dec', dlo,dhi)
# Read AllWISE catalog
W = []
for i,(d1,d2) in enumerate(allwise_catalog_dec_range):
if d1 < dhi and d2 > dlo:
print('Overlaps part', i+1)
catfn = '/global/homes/d/dstn/cosmo/data/wise/allwise-catalog/wise-allwise-cat-part%02i-radecmpro.fits' % (i+1)
C = fits_table(catfn)
print(len(C), 'sources')
C.cut((C.ra >= rlo) * (C.ra <= rhi) * (C.dec >= dlo) * (C.dec <= dhi))
print(len(C), 'kept')
W.append(C)
W = merge_tables(W)
print(len(W), 'AllWISE catalog sources')
W.writeto('/global/cscratch1/sd/dstn/wise.fits')
from astrometry.libkd.spherematch import match_radec
print('Matching...')
I,J,d = match_radec(W.ra, W.dec, L.ra, L.dec, 4./3600.)
print(len(I), 'matches')
from collections import Counter
CW = Counter(I)
CL = Counter(J)
K, = np.nonzero([(CW[i] == 1) and (CL[j] == 1) for i,j in zip(I,J)])
print(len(K), 'unique matches')
# Unmatched LS sources
U = np.ones(len(L), bool)
U[J] = False
# Cut to one-to-one unique matches
I = I[K]
J = J[K]
# Compute mags, un-applying the Vega-to-AB conversion factors
L.w1 = -2.5*(np.log10(L.flux_w1)-9.) - 2.699
L.w2 = -2.5*(np.log10(L.flux_w2)-9.) - 3.339
L.w3 = -2.5*(np.log10(L.flux_w3)-9.) - 5.174
L.w4 = -2.5*(np.log10(L.flux_w4)-9.) - 6.620
L.z = -2.5*(np.log10(L.flux_z)-9.)
L.r = -2.5*(np.log10(L.flux_r)-9.)
L.e_r = 2.5 * np.log10(L.mw_transmission_r)
L.e_z = 2.5 * np.log10(L.mw_transmission_z)
L.e_w1 = 2.5 * np.log10(L.mw_transmission_w1)
L.e_w2 = 2.5 * np.log10(L.mw_transmission_w2)
L.e_w3 = 2.5 * np.log10(L.mw_transmission_w3)
L.e_w4 = 2.5 * np.log10(L.mw_transmission_w4)
L.is_psf = np.array([t[0]=='P' for t in L.type])
# Matched
ML = L[J]
MW = W[I]
# Unmatched
UL = L[U]
#WISEAB1 = 2.699 / WISE Vega to AB conv for band 1
#WISEAB2 = 3.339 / WISE Vega to AB conv for band 2
#WISEAB3 = 5.174 / WISE Vega to AB conv for band 3
#WISEAB4 = 6.62 / WISE Vega to AB conv for band 4
loghist(MW.w1mpro, ML.w1, 200, range=((5,19),(5,19)), hot=False, imshowargs=dict(cmap=antigray))
ax = plt.axis()
plt.plot([5,21],[5,21], 'k-', alpha=0.2)
plt.axis(ax)
plt.xlabel('AllWISE W1 mag')
plt.ylabel('Legacy Survey Forced-Photometry W1 mag')
plt.axis([ax[1],ax[0],ax[3],ax[2]])
plt.savefig('w1-matched.pdf')
plt.clf()
lo,hi = 10,23
ha=dict(range=(lo,hi), bins=150, histtype='step', color='b', log=True)
n,b,p1 = plt.hist(W.w1mpro, **ha)
n,b,p2 = plt.hist(L.w1, lw=3, alpha=0.4, **ha)
plt.legend((p1[0],p2[0]), ('AllWISE Catalog', 'LegacySurvey Forced'),
loc='lower left')
plt.xlim(lo,hi)
yl,yh = plt.ylim()
print('Plot limits:', yl,yh)
plt.ylim(10,yh)
#plt.ylim(10,1e5)
plt.xlabel('W1 mag')
plt.ylabel('Number of sources')
plt.savefig('w1-count.pdf')
plt.clf()
I = (ML.is_psf)
ha = dict(nbins=100, range=((0,2.5),(0.5,3)), doclf=False, dohot=False, imshowargs=dict(cmap=antigray),
docolorbar=False)
H,xe,ye = plothist((ML.r - ML.z)[I], (ML.z - ML.w1)[I], **ha)
plt.xlabel('r - z (mag)')
plt.ylabel('z - W1 (mag)')
#plt.title('Catalog-matched PSFs')
plt.savefig('cc-matched.pdf')
print(np.sum(H), 'matched')
# rz = (ML.r - ML.z)[I]
# zw = (ML.z - ML.w1)[I]
# print(np.sum((rz>0)*(rz<3)*(zw>0.5)*(zw<2.5)), 'Matched')
plt.clf()
I = ((UL.flux_w1 * np.sqrt(UL.flux_ivar_w1) > 3.) *
(UL.flux_r * np.sqrt(UL.flux_ivar_r ) > 5.) *
(UL.flux_z * np.sqrt(UL.flux_ivar_z ) > 5.) *
(UL.is_psf))
H,xe,ye = plothist((UL.r - UL.z)[I], (UL.z - UL.w1)[I], **ha)
plt.xlabel('r - z (mag)')
plt.ylabel('z - W1 (mag)')
plt.savefig('cc-unmatched.pdf')
#plt.title('LegacySurvey PSF without AllWISE counterparts')
#plt.title('Additional faint PSF sources')
print(np.sum(H), 'matched')
# rz = (UL.r - UL.z)[I]
# zw = (UL.z - UL.w1)[I]
# print(np.sum((rz>0)*(rz<3)*(zw>0.5)*(zw<2.5)), 'Unmatched')
# plt.savefig('cc.png')
# loghist(ML.z - ML.w1, ML.w1 - ML.w2, 200, range=((-1,5),(-1,5)), hot=False, imshowargs=dict(cmap=antigray));
# plt.xlabel('z - W1 (mag)')
# plt.ylabel('W1 - W2 (mag)')
#
# loghist((ML.z - ML.w1)[ML.is_psf], (ML.w1 - ML.w2)[ML.is_psf], 200, range=((-1,5),(-1,5)), hot=False, imshowargs=dict(cmap=antigray));
# plt.xlabel('z - W1 (mag)')
# plt.ylabel('W1 - W2 (mag)')
# plt.title('LegacySurvey PSFs matched to AllWISE catalog')
#
# plothist((ML.z - ML.w1)[ML.is_psf], (ML.w1 - ML.w2)[ML.is_psf], 200, range=((0.5,3),(-0.5,0.5)), dohot=False, imshowargs=dict(cmap=antigray));
# plt.xlabel('z - W1 (mag)')
# plt.ylabel('W1 - W2 (mag)')
# plt.title('LegacySurvey PSFs (matched to AllWISE catalog)')
#
# I = np.logical_not(ML.is_psf)
# plothist((ML.z - ML.w1)[I], (ML.w1 - ML.w2)[I], 200, range=((0.5,3),(-0.5,0.5)), dohot=False, imshowargs=dict(cmap=antigray));
# plt.xlabel('z - W1 (mag)')
# plt.ylabel('W1 - W2 (mag)')
# plt.title('LegacySurvey NON-PSFs (matched to AllWISE catalog)')
#
# plt.subplot(1,2,1)
# I = ML.is_psf
# plothist((ML.z - ML.w1)[I], (ML.w1 - ML.w2)[I], 200, range=((0.5,3),(-0.5,0.5)), doclf=False, dohot=False, imshowargs=dict(cmap=antigray));
# plt.xlabel('z - W1 (mag)')
# plt.ylabel('W1 - W2 (mag)')
# plt.title('LegacySurvey PSFs (matched to AllWISE catalog)')
#
# plt.subplot(1,2,2)
# I = np.logical_not(ML.is_psf)
# plothist((ML.z - ML.w1)[I], (ML.w1 - ML.w2)[I], 200, range=((0.5,3),(-0.5,0.5)), doclf=False, dohot=False, imshowargs=dict(cmap=antigray));
# plt.xlabel('z - W1 (mag)')
# plt.ylabel('W1 - W2 (mag)')
# plt.title('LegacySurvey NON-PSFs (matched to AllWISE catalog)')
# I = ((UL.flux_w1 * np.sqrt(UL.flux_ivar_w1) > 3.) *
# (UL.flux_w2 * np.sqrt(UL.flux_ivar_w2) > 3.) *
# (UL.flux_z * np.sqrt(UL.flux_ivar_z ) > 3.) *
# (UL.is_psf))
# plothist((UL.z - UL.w1)[I], (UL.w1 - UL.w2)[I], 200, range=((0.5,3),(-0.5,0.5)), dohot=False, imshowargs=dict(cmap=antigray));
# plt.xlabel('z - W1 (mag)')
# plt.ylabel('W1 - W2 (mag)')
# plt.title('LegacySurvey PSFs (UNmatched to AllWISE catalog)')
#
#
# # In[86]:
#
# plothist((L.z - L.w1)[L.is_psf], (L.w1 - L.w2)[L.is_psf], 200, range=((0.5,3),(-0.5,0.5)), dohot=False, imshowargs=dict(cmap=antigray));
# plt.xlabel('z - W1 (mag)')
# plt.ylabel('W1 - W2 (mag)')
# plt.title('LegacySurvey PSFs (all)')
#
#
# # In[70]:
#
# plothist((L.z - L.w1), (L.w1 - L.w2), 200, range=((0.5,3),(-0.5,0.5)), dohot=False, imshowargs=dict(cmap=antigray));
# plt.xlabel('z - W1 (mag)')
# plt.ylabel('W1 - W2 (mag)')
# plt.title('LegacySurvey (all)')
#
#
# # In[58]:
#
# I = L.is_psf
# loghist((L.z - L.w1)[I], (L.w1 - L.w2)[I], 200, range=((-1,5),(-1,5)), hot=False, imshowargs=dict(cmap=antigray));
# plt.xlabel('z - W1 (mag)')
# plt.ylabel('W1 - W2 (mag)')
#
#
# # In[125]:
#
# plt.hist(ML.flux_w1 * np.sqrt(ML.flux_ivar_w1), range=(0,100), bins=100, histtype='step', color='b', log=True);
# plt.hist(L.flux_w1 * np.sqrt(L.flux_ivar_w1), range=(0,100), bins=100, histtype='step', color='k', log=True);
# plt.hist(UL.flux_w1 * np.sqrt(UL.flux_ivar_w1), range=(0,100), bins=100, histtype='step', color='r', log=True);
#
#
# # In[ ]:
#
#
#
#
# # In[122]:
#
# plt.hist(ML.w1, range=(10,20), bins=100, histtype='step', color='b', log=True);
# plt.hist(L.w1 , range=(10,20), bins=100, histtype='step', color='k', log=True);
# plt.hist(UL.w1 , range=(10,20), bins=100, histtype='step', color='r', log=True);
# yl,yh = plt.ylim()
# plt.ylim(1,yh);
#
#
# # In[60]:
#
# I = ML.is_psf
# plt.hist(ML.flux_w1[I] * np.sqrt(ML.flux_ivar_w1[I]), range=(0,20), bins=100, histtype='step', color='g');
# plt.hist(ML.flux_w2[I] * np.sqrt(ML.flux_ivar_w2[I]), range=(0,20), bins=100, histtype='step', color='r');
# plt.hist(ML.flux_z[I] * np.sqrt(ML.flux_ivar_z [I]), range=(0,20), bins=100, histtype='step', color='b');
# plt.xlabel('S/N');
#
#
# # In[130]:
#
# plt.subplot(1,2,1)
# I = (ML.is_psf)
# plothist((ML.r - ML.z)[I], (ML.z - ML.w1)[I], 200, range=((0,3),(0.5,2.5)), doclf=False, dohot=False, imshowargs=dict(cmap=antigray));
# plt.xlabel('r - z (mag)')
# plt.ylabel('z - W1 (mag)')
# plt.title('LegacySurvey PSFs (matched to AllWISE catalog)')
#
# rz = (ML.r - ML.z)[I]
# zw = (ML.z - ML.w1)[I]
# print(np.sum((rz>0)*(rz<3)*(zw>0.5)*(zw<2.5)), 'Matched')
#
# plt.subplot(1,2,2)
# I = ((UL.flux_w1 * np.sqrt(UL.flux_ivar_w1) > 5.) *
# (UL.flux_r * np.sqrt(UL.flux_ivar_r ) > 5.) *
# (UL.flux_z * np.sqrt(UL.flux_ivar_z ) > 5.) *
# (UL.is_psf))
# #I = UL.is_psf
# plothist((UL.r - UL.z)[I], (UL.z - UL.w1)[I], 200, range=((0,3),(0.5,2.5)), doclf=False, dohot=False, imshowargs=dict(cmap=antigray));
# plt.xlabel('r-z (mag)')
# plt.ylabel('z-W1 (mag)')
# plt.title('LegacySurvey PSFs (UNmatched to AllWISE catalog)')
#
# rz = (UL.r - UL.z)[I]
# zw = (UL.z - UL.w1)[I]
# print(np.sum((rz>0)*(rz<3)*(zw>0.5)*(zw<2.5)), 'Unmatched')
#
# plt.savefig('cc.png')
#
#
# # In[127]:
#
# plt.subplot(1,2,1)
# I = (ML.is_psf)
# plothist((ML.r - ML.z)[I], (ML.z - (ML.w1+ML.w2)/2.)[I], 200, range=((0,3),(0.5,2.5)), doclf=False, dohot=False, imshowargs=dict(cmap=antigray));
# plt.xlabel('r - z (mag)')
# plt.ylabel('z - W (mag)')
# plt.title('LegacySurvey PSFs (matched to AllWISE catalog)')
#
# plt.subplot(1,2,2)
# I = ((UL.flux_w1 * np.sqrt(UL.flux_ivar_w1) > 3.) *
# (UL.flux_r * np.sqrt(UL.flux_ivar_r ) > 3.) *
# (UL.flux_z * np.sqrt(UL.flux_ivar_z ) > 3.) *
# (UL.is_psf))
# #I = UL.is_psf
# plothist((UL.r - UL.z)[I], (UL.z - (UL.w1+UL.w2)/2.)[I], 200, range=((0,3),(0.5,2.5)), doclf=False, dohot=False, imshowargs=dict(cmap=antigray));
# plt.xlabel('r - z (mag)')
# plt.ylabel('z - W (mag)')
# plt.title('LegacySurvey PSFs (UNmatched to AllWISE catalog)')
#plt.subplot(1,2,1)
if False:
plt.clf()
ha = dict(nbins=100, range=((-0.5,3),(0,3)), doclf=False, hot=False, imshowargs=dict(cmap=antigray))
I = (ML.is_psf)
loghist((ML.r - ML.z)[I], (ML.z - ML.w1)[I], **ha)
plt.xlabel('r - z (mag)')
plt.ylabel('z - W1 (mag)')
#plt.title('LegacySurvey PSFs matched to AllWISE catalog')
plt.savefig('cc-matched.pdf')
rz = (ML.r - ML.z)[I]
zw = (ML.z - ML.w1)[I]
print(np.sum((rz>0)*(rz<3)*(zw>0.5)*(zw<2.5)), 'Matched')
plt.clf()
ha.update(imshowargs=dict(cmap=antigray, vmax=np.log10(3000)))
I = ((UL.flux_w1 * np.sqrt(UL.flux_ivar_w1) > 3.) *
(UL.flux_r * np.sqrt(UL.flux_ivar_r ) > 3.) *
(UL.flux_z * np.sqrt(UL.flux_ivar_z ) > 3.) *
(UL.is_psf))
loghist((UL.r - UL.z)[I], (UL.z - UL.w1)[I], **ha)
plt.xlabel('r - z (mag)')
plt.ylabel('z - W1 (mag)')
#plt.title('LegacySurvey PSFs unmatched to AllWISE catalog')
plt.savefig('cc-unmatched.pdf')
rz = (UL.r - UL.z)[I]
zw = (UL.z - UL.w1)[I]
print(np.sum((rz>0)*(rz<3)*(zw>0.5)*(zw<2.5)), 'Unmatched')
plt.clf()
ha = dict(nbins=100, range=((-0.5,3),(0,3)), doclf=False, hot=False, imshowargs=dict(cmap=antigray))
I = (ML.is_psf)
rz = ((ML.r-ML.e_r) - (ML.z-ML.e_z))[I]
zw = ((ML.z-ML.e_z) - (ML.w1-ML.e_w1))[I]
loghist(rz, zw, **ha)
plt.xlabel('r - z (mag)')
plt.ylabel('z - W1 (mag)')
#plt.title('LegacySurvey PSFs matched to AllWISE catalog')
plt.savefig('cc-matched2.pdf')
print(np.sum((rz>0)*(rz<3)*(zw>0.5)*(zw<2.5)), 'Matched')
plt.clf()
ha.update(imshowargs=dict(cmap=antigray, vmax=np.log10(3000)))
I = ((UL.flux_w1 * np.sqrt(UL.flux_ivar_w1) > 3.) *
(UL.flux_r * np.sqrt(UL.flux_ivar_r ) > 3.) *
(UL.flux_z * np.sqrt(UL.flux_ivar_z ) > 3.) *
(UL.is_psf))
rz = ((UL.r-UL.e_r) - (UL.z-UL.e_z))[I]
zw = ((UL.z-UL.e_z) - (UL.w1-UL.e_w1))[I]
loghist(rz, zw, **ha)
plt.xlabel('r - z (mag)')
plt.ylabel('z - W1 (mag)')
#plt.title('LegacySurvey PSFs unmatched to AllWISE catalog')
plt.savefig('cc-unmatched2.pdf')
print(np.sum((rz>0)*(rz<3)*(zw>0.5)*(zw<2.5)), 'Unmatched')
plt.clf()
ha = dict(nbins=200, range=((-5,10),(13,25)), doclf=False, hot=False, imshowargs=dict(cmap=antigray, vmax=4.))
I = (ML.is_psf)
loghist((ML.r - ML.w1)[I], ML.r[I], **ha)
plt.xlabel('r - W1 (mag)')
plt.ylabel('r (mag)')
#plt.title('LegacySurvey PSFs (matched to AllWISE catalog)')
plt.savefig('cm-matched.pdf')
plt.clf()
I = (#(L.flux_w1 * np.sqrt(L.flux_ivar_w1) > 3.) *
#(L.flux_r * np.sqrt(L.flux_ivar_r ) > 3.) *
#(L.flux_z * np.sqrt(L.flux_ivar_z ) > 3.) *
(L.is_psf))
loghist((L.r - L.w1)[I], L.r[I], **ha)
plt.xlabel('r - W1 (mag)')
plt.ylabel('r (mag)')
plt.savefig('cm-all.pdf')
|
bsd-3-clause
|
edhuckle/statsmodels
|
statsmodels/formula/tests/test_formula.py
|
29
|
4647
|
from statsmodels.compat.python import iteritems, StringIO
import warnings
from statsmodels.formula.api import ols
from statsmodels.formula.formulatools import make_hypotheses_matrices
from statsmodels.tools import add_constant
from statsmodels.datasets.longley import load, load_pandas
import numpy.testing as npt
from statsmodels.tools.testing import assert_equal
from numpy.testing.utils import WarningManager
longley_formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
class CheckFormulaOLS(object):
@classmethod
def setupClass(cls):
cls.data = load()
def test_endog_names(self):
assert self.model.endog_names == 'TOTEMP'
def test_exog_names(self):
assert self.model.exog_names == ['Intercept', 'GNPDEFL', 'GNP',
'UNEMP', 'ARMED', 'POP', 'YEAR']
def test_design(self):
npt.assert_equal(self.model.exog,
add_constant(self.data.exog, prepend=True))
def test_endog(self):
npt.assert_equal(self.model.endog, self.data.endog)
def test_summary(self):
# smoke test
warn_ctx = WarningManager()
warn_ctx.__enter__()
try:
warnings.filterwarnings("ignore",
"kurtosistest only valid for n>=20")
self.model.fit().summary()
finally:
warn_ctx.__exit__()
class TestFormulaPandas(CheckFormulaOLS):
@classmethod
def setupClass(cls):
data = load_pandas().data
cls.model = ols(longley_formula, data)
super(TestFormulaPandas, cls).setupClass()
class TestFormulaDict(CheckFormulaOLS):
@classmethod
def setupClass(cls):
data = dict((k, v.tolist()) for k, v in iteritems(load_pandas().data))
cls.model = ols(longley_formula, data)
super(TestFormulaDict, cls).setupClass()
class TestFormulaRecArray(CheckFormulaOLS):
@classmethod
def setupClass(cls):
data = load().data
cls.model = ols(longley_formula, data)
super(TestFormulaRecArray, cls).setupClass()
def test_tests():
formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
dta = load_pandas().data
results = ols(formula, dta).fit()
test_formula = '(GNPDEFL = GNP), (UNEMP = 2), (YEAR/1829 = 1)'
LC = make_hypotheses_matrices(results, test_formula)
R = LC.coefs
Q = LC.constants
npt.assert_almost_equal(R, [[0, 1, -1, 0, 0, 0, 0],
[0, 0 , 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1./1829]], 8)
npt.assert_array_equal(Q, [[0],[2],[1]])
def test_formula_labels():
# make sure labels pass through patsy as expected
# data(Duncan) from car in R
dta = StringIO(""""type" "income" "education" "prestige"\n"accountant" "prof" 62 86 82\n"pilot" "prof" 72 76 83\n"architect" "prof" 75 92 90\n"author" "prof" 55 90 76\n"chemist" "prof" 64 86 90\n"minister" "prof" 21 84 87\n"professor" "prof" 64 93 93\n"dentist" "prof" 80 100 90\n"reporter" "wc" 67 87 52\n"engineer" "prof" 72 86 88\n"undertaker" "prof" 42 74 57\n"lawyer" "prof" 76 98 89\n"physician" "prof" 76 97 97\n"welfare.worker" "prof" 41 84 59\n"teacher" "prof" 48 91 73\n"conductor" "wc" 76 34 38\n"contractor" "prof" 53 45 76\n"factory.owner" "prof" 60 56 81\n"store.manager" "prof" 42 44 45\n"banker" "prof" 78 82 92\n"bookkeeper" "wc" 29 72 39\n"mail.carrier" "wc" 48 55 34\n"insurance.agent" "wc" 55 71 41\n"store.clerk" "wc" 29 50 16\n"carpenter" "bc" 21 23 33\n"electrician" "bc" 47 39 53\n"RR.engineer" "bc" 81 28 67\n"machinist" "bc" 36 32 57\n"auto.repairman" "bc" 22 22 26\n"plumber" "bc" 44 25 29\n"gas.stn.attendant" "bc" 15 29 10\n"coal.miner" "bc" 7 7 15\n"streetcar.motorman" "bc" 42 26 19\n"taxi.driver" "bc" 9 19 10\n"truck.driver" "bc" 21 15 13\n"machine.operator" "bc" 21 20 24\n"barber" "bc" 16 26 20\n"bartender" "bc" 16 28 7\n"shoe.shiner" "bc" 9 17 3\n"cook" "bc" 14 22 16\n"soda.clerk" "bc" 12 30 6\n"watchman" "bc" 17 25 11\n"janitor" "bc" 7 20 8\n"policeman" "bc" 34 47 41\n"waiter" "bc" 8 32 10""")
from pandas import read_table
dta = read_table(dta, sep=" ")
model = ols("prestige ~ income + education", dta).fit()
assert_equal(model.fittedvalues.index, dta.index)
def test_formula_predict():
from numpy import log
formula = """TOTEMP ~ log(GNPDEFL) + log(GNP) + UNEMP + ARMED +
POP + YEAR"""
data = load_pandas()
dta = load_pandas().data
results = ols(formula, dta).fit()
npt.assert_almost_equal(results.fittedvalues.values,
results.predict(data.exog), 8)
|
bsd-3-clause
|
HRClab/SimInterace
|
SimInterface/System.py
|
2
|
13623
|
"""
The fundamental objects of the SimInterface are systems.
"""
try:
import graphviz as gv
graphviz = True
except ImportError:
graphviz = False
import pandas as pd
import numpy as np
import collections as col
import Variable as Var
import inspect as ins
def castToTuple(Vars):
if Vars is None:
return tuple()
elif isinstance(Vars,col.Iterable):
return set(Vars)
else:
return (Vars,)
def castToSet(S):
if isinstance(S,set):
return S
elif isinstance(S,col.Iterable):
return set(S)
elif S is None:
return set()
else:
return set([S])
class System:
"""
I think a better solution would be obtained by just forcing
StateFunc, and OutputFuncs to simply be function objects, so that
the variables would just inherit.
"""
def __init__(self,Funcs=set(),label=''):
self.label=label
self.__buildSystem(Funcs)
def add(self,func):
Funcs = self.Funcs | set([func])
self.__buildSystem(Funcs)
def update(self,NewFuncs):
NewFuncSet = castToSet(NewFuncs)
Funcs = self.Funcs | NewFuncSet
self.__buildSystem(Funcs)
def __buildSystem(self,Funcs):
self.Funcs = castToSet(Funcs)
# Get all the variables
self.Vars = reduce(lambda a,b : a|b,
[f.Vars for f in self.Funcs],
set())
# We will now build an execution order for the output functions
Parents = dict()
Children = dict()
Executable = col.deque()
self.ExecutionOrder = []
for f in self.Funcs:
Parents[f] = set(v.Source for v in f.InputVars) & self.Vars
if (len(Parents[f]) == 0) and (isinstance(f,StaticFunction)):
# If a function has no parents it is executable immediately
# Only put static functions in the execution order
Executable.append(f)
if f in Parents[f]:
# Figure out how to make an error statement here.
print 'Not well-posed, function depends on itself'
# # For convencience we also construct the inverse dictionary
# # For each function, the set of functions that it is used to produce
Children = {f : set() for f in self.Funcs}
for f in self.Funcs:
for g in Children[f]:
Children[g].union(set(f))
# Now finally we create the execution order
while len(Executable) > 0:
f = Executable.pop()
self.ExecutionOrder.append(f)
for child in Children[f]:
Parents[child].remove(f)
if (len(Parents[child]) == 0) and \
(isinstance(child,StaticFunction)):
Executable.append(child)
# Build a dictionary from labels to current values
self.labelToValue = {v.label : np.array(v.data.iloc[0]) \
for v in self.Vars}
##### Things needed for Vector Field ######
self.StateFuncs = [f for f in self.Funcs if len(f.StateVars)>0]
StateVarSet = reduce(lambda a,b : a|b,
[set(f.StateVars) for f in self.Funcs],
set())
self.StateVars = list(StateVarSet)
self.stateToFunc = {v : [] for v in self.StateVars}
for f in self.StateFuncs:
for v in f.StateVars:
self.stateToFunc[v].append(f)
# Create auxilliary states for exogenous signals
self.InputSignals = [v for v in self.Vars if \
(v.Source not in self.Funcs) and \
(isinstance(v,Var.Signal))]
self.IndexSlopes = []
for v in self.InputSignals:
TimeIndex = np.array(v.data.index.levels[1])
slopeList = 1./np.diff(TimeIndex)
self.IndexSlopes.append(slopeList)
##### Initial Condition for ODE Integration ######
Dimensions = [0]
Dimensions.extend([v.data.shape[1] for v in self.StateVars])
self.StateIndexBounds = np.cumsum(Dimensions)
NumStates = len(self.StateVars)
self.InitialState = np.zeros(self.StateIndexBounds[-1] + \
len(self.InputSignals))
for k in range(NumStates):
InitVal = np.array(self.StateVars[k].data.iloc[0])
indLow,indHigh = self.StateIndexBounds[k:k+2]
self.InitialState[indLow:indHigh] = InitVal
self.__createGraph()
def UpdateParameters(self):
Parameters = [v for v in self.Vars if isinstance(v,Var.Parameter)]
for v in Parameters:
self.labelToValue[v.label] = np.array(v.data.iloc[0])
def UpdateSignals(self,Time=[],State=[]):
T = len(Time)
Signals = set([v for v in self.Vars if isinstance(v,Var.Signal)])
InternalSignals = Signals - set(self.InputSignals)
NewData = {v : np.zeros((T,v.data.shape[1])) \
for v in InternalSignals}
for k in range(len(Time)):
t = Time[k]
S = State[k]
self.__setSignalValues(t,S)
for v in InternalSignals:
NewData[v][k] = self.labelToValue[v.label]
for v in InternalSignals:
# Need a reset
v.setData(NewData[v],Time)
def __setSignalValues(self,Time,State):
# Set State Values
NumStates = len(self.StateVars)
for k in range(NumStates):
v = self.StateVars[k]
indLow,indHigh = self.StateIndexBounds[k:k+2]
curVal = State[indLow:indHigh]
self.labelToValue[v.label] = curVal
# Set Input Signal Values
NumIndexStates = len(self.InputSignals)
IndexStateList = State[-NumIndexStates:]
for k in range(NumIndexStates):
ctsIndex = IndexStateList[k]
curInd = int(np.floor(ctsIndex))
nextInd = curInd+1
if nextInd < len(self.IndexSlopes[k]):
v = self.InputSignals[k]
# Linearly interpolate exogenous inputs
# Presumably this could help smoothness.
# and it is not very hard.
prevInput = v.data.iloc[curInd]
nextInput = v.data.iloc[nextInd]
lam = IndexStateList[k] - curInd
# this can be called later.
inputVal = (1-lam) * prevInput + lam * nextInput
self.labelToValue[v.label] = np.array(inputVal)
else:
# If out of bounds just stay at the last value.
self.labelToValue[v.label] = np.array(v.data.iloc[-1])
# Set Intermediate Signal Values
for f in self.ExecutionOrder:
argList = ins.getargspec(f.func)[0]
valList = [self.labelToValue[lab] for lab in argList]
outTup = f.func(*valList)
if len(f.OutputVars) > 1:
for k in len(f.OutputVars):
outVariable = f.OutputVars[k]
outValue = outTup[k]
self.labelToValue[outVariable.label] = outValue
else:
self.labelToValue[f.OutputVars[0].label] = outTup
def VectorField(self,Time,State):
"""
Something suitable for passing to ODE methods.
"""
State_dot = np.zeros(len(State))
self.__setSignalValues(Time,State)
NumStates = len(self.StateVars)
# Apply the vector fields
## Compute
NumIndexStates = len(self.InputSignals)
IndexStateList = State[-NumIndexStates:]
IndexSlopes = np.zeros(NumIndexStates)
for k in range(NumIndexStates):
ctsIndex = IndexStateList[k]
curInd = int(np.floor(ctsIndex))
nextInd = curInd+1
if nextInd < len(self.IndexSlopes[k]):
# Not too near end
IndexSlopes[k] = self.IndexSlopes[k][curInd]
else:
# If out of bounds just stay at the last value.
IndexSlopes[k] = 0.
## Plug in the derivative of the index slopes.
State_dot[-NumIndexStates:] = IndexSlopes
## Compute main vector field
dvdt = {v : np.zeros(v.data.shape[1]) for v in self.StateVars}
for f in self.StateFuncs:
argList = ins.getargspec(f.func)[0]
valList = [self.labelToValue[lab] for lab in argList]
dxdt = f.func(*valList)
nx = len(f.StateVars)
# output may or may not be a tuple.
if nx > 1:
for k in range(nx):
dvdt[f.StateVars[k]] += dxdt[k]
else:
dvdt[f.StateVars[0]] += dxdt
for k in range(NumStates):
indLow,indHigh = self.StateIndexBounds[k:k+2]
State_dot[indLow:indHigh] = dvdt[self.StateVars[k]]
return State_dot
def __createGraph(self):
"""
Create a graph using the graphviz module.
It may be advisable to make this a bit more separated.
Namely, make a separate add-on that you pass the system to and it
would produce a graph.
Basically make a separate submodule called "SystemGraph"
"""
if not graphviz:
self.graph = None
return
dot = gv.Digraph(name=self.label)
# Ignore the integrators
NonIntegrators = set([f for f in self.Funcs if f.ftype != 'integrator'])
for f in NonIntegrators:
dot.node(f.label,shape='box')
# Handle state vars and nonstate vars separately
StateVars = set(self.StateVars)
NonState = self.Vars - StateVars
for v in self.Vars:
if (v.Source not in NonIntegrators) and (v in NonState):
dot.node(v.label,shape='plaintext')
for tar in (set(v.Targets) & NonIntegrators):
dot.edge(v.label,tar.label)
else:
for tar in (set(v.Targets) & NonIntegrators):
if v.Source in NonIntegrators:
dot.edge(v.Source.label,tar.label,label=v.label)
if len(set(v.Targets) & self.Funcs) == 0:
dot.node(v.label,shape='plaintext')
if v.Source in NonIntegrators:
dot.edge(v.Source.label,v.label)
# Special handling for states
for v in StateVars:
for f in self.stateToFunc[v]:
for g in (v.Targets & NonIntegrators):
dot.edge(f.label,g.label,label=v.label)
self.graph = dot
def Connect(Systems=set(),label='Sys'):
Funcs = reduce(lambda a,b : a | b, [S.Funcs for S in Systems],set())
return System(Funcs,label)
class Function(System):
def __init__(self,func=lambda : None,label='Fun',
StateVars = tuple(),
InputVars = tuple(),
OutputVars = tuple(),
ftype=None):
self.func = func
self.label = label
self.ftype = ftype
# In general, ordering of variables matters.
# This is especially true of parsing outputs
self.StateVars = castToTuple(StateVars)
self.InputVars = castToTuple(InputVars)
self.OutputVars = castToTuple(OutputVars)
# Sometimes, however, we only care about
# an un-ordered set
StateSet = set(self.StateVars)
InputSet = set(self.InputVars)
OutputSet = set(self.OutputVars)
self.Vars = StateSet | InputSet | OutputSet
map(lambda v : v.Targets.add(self),StateSet | InputSet)
for v in OutputSet:
v.Source = self
System.__init__(self,self,label)
class StaticFunction(Function):
def __init__(self,func=None,InputVars=None,OutputVars=None,label='Fun'):
Function.__init__(self,func=func,label=label,ftype='static',
InputVars=InputVars,OutputVars=OutputVars)
class DifferentialEquation(System):
def __init__(self,func=None,StateVars=None,InputVars=None,
Time=None,label='DiffEq'):
# Dummy signals for the time derivatives
# These "outputs" are fed into a dummy integrator function
OutputVars = []
StateVars = castToTuple(StateVars)
# Make an ordered list of derivative variables corresponding
# to the state variables.
for v in StateVars:
dvdt = Var.Signal(label='d%s/dt' % v.label,
data=np.zeros((1,v.data.shape[1])),
TimeStamp=np.zeros(1))
OutputVars.append(dvdt)
VectorField = Function(func=func,label=label,
InputVars=InputVars,
OutputVars=OutputVars,
StateVars=StateVars,
ftype='vector_field')
Integrator = Function(label='Integrator',
InputVars=OutputVars,
OutputVars=StateVars,
ftype='integrator')
System.__init__(self,
Funcs=set([VectorField,Integrator]),label=label)
|
mit
|
cavestruz/MLPipeline
|
gridsearch/model_grid_search.py
|
1
|
1194
|
import sys
from sklearn.cross_validation import train_test_split
from MLPipeline.IO.config_parser import parse_configfile
from MLPipeline.IO.collect_classes import get_two_classes
from _tools import build_parameter_grid, grid_search
from MLPipeline.pipeline_tools.build_pipeline import build_pipeline
import time
cfg = parse_configfile(sys.argv[1])
start_time = time.time()
# Collect data and labels
X, y = get_two_classes(cfg['filenames']['non_lens_glob'],
cfg['filenames']['lens_glob'])
assert( len(X) == len(y) )
assert( len(X) > 0 )
# Split the dataset and labels into training and testing subsets
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2 )
assert( len(X_train) == len(y_train) )
assert( len(X_test) == len(y_test) )
assert( len(X_train) > 0 )
assert( len(X_test) > 0 )
# Build the parameter grid
param_grid = build_parameter_grid(cfg['param_grid'])
# Build the pipeline
pipeline = build_pipeline(cfg['image_processing'].values(),
cfg['classifier']['label'])
# Perform the grid search
grid_search(pipeline, param_grid, X_train, y_train, X_test, y_test)
print 'Time Taken:', time.time()-start_time
|
mit
|
evanthebouncy/nnhmm
|
battleship_lstm_policy/draw.py
|
3
|
3272
|
import numpy as np
import matplotlib.pylab as plt
import multiprocessing as mp
from matplotlib import figure
# m = [[0.0, 1.47, 2.43, 3.44, 1.08, 2.83, 1.08, 2.13, 2.11, 3.7], [1.47, 0.0, 1.5, 2.39, 2.11, 2.4, 2.11, 1.1, 1.1, 3.21], [2.43, 1.5, 0.0, 1.22, 2.69, 1.33, 3.39, 2.15, 2.12, 1.87], [3.44, 2.39, 1.22, 0.0, 3.45, 2.22, 4.34, 2.54, 3.04, 2.28], [1.08, 2.11, 2.69, 3.45, 0.0, 3.13, 1.76, 2.46, 3.02, 3.85], [2.83, 2.4, 1.33, 2.22, 3.13, 0.0, 3.83, 3.32, 2.73, 0.95], [1.08, 2.11, 3.39, 4.34, 1.76, 3.83, 0.0, 2.47, 2.44, 4.74], [2.13, 1.1, 2.15, 2.54, 2.46, 3.32, 2.47, 0.0, 1.78, 4.01], [2.11, 1.1, 2.12, 3.04, 3.02, 2.73, 2.44, 1.78, 0.0, 3.57], [3.7, 3.21, 1.87, 2.28, 3.85, 0.95, 4.74, 4.01, 3.57, 0.0]]
FIG = plt.figure()
def draw_coord(coord, name, lab=[1.0, 0.0]):
color = 1.0 if lab[0] > lab[1] else -1.0
ret = np.zeros(shape=[20,20,1])
coord_x, coord_y = coord
coord_x_idx = np.argmax(coord_x)
coord_y_idx = np.argmax(coord_y)
ret[coord_x_idx][coord_y_idx][0] = color
draw(ret, name)
def draw(m, name):
FIG.clf()
matrix = m
orig_shape = np.shape(matrix)
# lose the channel shape in the end of orig_shape
new_shape = orig_shape[:-1]
matrix = np.reshape(matrix, new_shape)
ax = FIG.add_subplot(1,1,1)
ax.set_aspect('equal')
plt.imshow(matrix, interpolation='nearest', cmap=plt.cm.gray)
# plt.imshow(matrix, interpolation='nearest', cmap=plt.cm.ocean)
plt.colorbar()
plt.savefig(name)
def draw_obs(obs, name):
ret_shape = [10, 10, 1]
ret = np.zeros(shape=ret_shape)
for ob, lab in obs:
ii, jj = ob
labb = 1.0 if lab[0] > lab[1] else -1.0
ret[ii][jj][0] = labb
draw(ret, name)
def draw_annotate(x_cords, y_cords, anns, name):
FIG.clf()
y = x_cords
z = y_cords
n = anns
fig = FIG
ax = fig.add_subplot(1,1,1)
ax.set_xlim([0,10])
ax.set_ylim([0,10])
ax.set_ylim(ax.get_ylim()[::-1])
ax.scatter(z, y)
for i, txt in enumerate(n):
ax.annotate(txt, (z[i],y[i]))
fig.savefig(name)
def draw_trace(trace, name):
x_coords = []
y_coords = []
anno = []
for i, stuff in enumerate(trace):
ob, inv = stuff
# x_coords.append(inv[0])
# y_coords.append(inv[1])
# anno.append("X"+str(i))
if ob != None:
print ob
ob_coord, ob_outcome = ob
x_coords.append(ob_coord[0])
y_coords.append(ob_coord[1])
anno.append("O"+str(i)+str(int(ob_outcome[0])))
draw_annotate(x_coords, y_coords, anno, name)
def draw_ship(ship_coords, name):
ret_shape = [10, 10, 1]
ret = np.zeros(shape=ret_shape)
for sc in ship_coords:
l = sc[0][0]
u = sc[0][1]
r = sc[1][0]
d = sc[1][1]
for i in range(l, r+1):
for j in range(u, d+1):
ret[i][j][0] = 1.0
draw(ret, name)
def draw_all_preds(all_preds, name):
ret_shape = [10, 10, 1]
ret = np.zeros(shape=ret_shape)
for qq, labb in all_preds:
i, j = qq
# ret[i][j][0] = 1.0 if labb[0] > labb[1] else 0.0
# ret[i][j][0] = labb[0]
ret[i][j][0] = 1.0 - abs(labb[0] - labb[1])
draw(ret, name)
# draw again for predictions
ret_shape = [10, 10, 1]
ret = np.zeros(shape=ret_shape)
for qq, labb in all_preds:
i, j = qq
ret[i][j][0] = labb[0]
draw(ret, name.replace("_inv", "_hypothesis_"))
|
mit
|
oliverlee/sympy
|
examples/intermediate/mplot3d.py
|
93
|
1252
|
#!/usr/bin/env python
"""Matplotlib 3D plotting example
Demonstrates plotting with matplotlib.
"""
import sys
from sample import sample
from sympy import sin, Symbol
from sympy.external import import_module
def mplot3d(f, var1, var2, show=True):
"""
Plot a 3d function using matplotlib/Tk.
"""
import warnings
warnings.filterwarnings("ignore", "Could not match \S")
p = import_module('pylab')
# Try newer version first
p3 = import_module('mpl_toolkits.mplot3d',
__import__kwargs={'fromlist': ['something']}) or import_module('matplotlib.axes3d')
if not p or not p3:
sys.exit("Matplotlib is required to use mplot3d.")
x, y, z = sample(f, var1, var2)
fig = p.figure()
ax = p3.Axes3D(fig)
# ax.plot_surface(x, y, z, rstride=2, cstride=2)
ax.plot_wireframe(x, y, z)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if show:
p.show()
def main():
x = Symbol('x')
y = Symbol('y')
mplot3d(x**2 - y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20))
# mplot3d(x**2+y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20))
# mplot3d(sin(x)+sin(y), (x, -3.14, 3.14, 10), (y, -3.14, 3.14, 10))
if __name__ == "__main__":
main()
|
bsd-3-clause
|
OpenSourcePolicyCenter/taxdata
|
puf_data/StatMatch/Matching/phase2.py
|
1
|
1713
|
"""
Perform the match and evaluate the CPS variables.
Input file: soirets2009_ph1.csv, cpsrets14_ph1.csv
Output file: match.csv
"""
import numpy as np
import pandas as pd
from tqdm import tqdm
def phasetwo(SOI, CPS):
CPS.loc[:, 'wt_adj'] = CPS['wt'] * CPS['factor']
factor = 1.
if CPS['wt'].sum() > 0:
factor = SOI['wt'].sum() / float(CPS['wt'].sum())
# CPS['wt_adj'] = CPS['wt'] * factor
cellid = np.unique(SOI['cellid'].values)
soi_list = list()
cps_list = list()
cwt_list = list()
for cid in tqdm(cellid):
soi = SOI[SOI['cellid'] == cid]
cps = CPS[CPS['cellid'] == cid]
soi = soi.sort_values('yhat', kind='mergesort')
cps = cps.sort_values('yhat', kind='mergesort')
soi = soi.to_dict('records')
cps = cps.to_dict('records')
j = 0
bwt = cps[0]['wt_adj']
count = len(cps) - 1
epsilon = 0.001
for record in soi:
awt = record['wt']
while awt > epsilon:
cwt = min(awt, bwt)
soiseq = record['soiseq']
cpsseq = cps[j]['cpsseq']
soi_list.append(soiseq)
cps_list.append(cpsseq)
cwt_list.append(cwt)
awt = max(0, awt - cwt)
bwt = max(0, bwt - cwt)
if bwt <= epsilon:
if j < count:
j += 1
bwt = cps[j]['wt_adj']
match = pd.DataFrame({'soiseq': soi_list, 'cpsseq': cps_list,
'cwt': cwt_list})
return match
|
mit
|
francesco-mannella/dmp-esn
|
parametric/parametric_dmp/bin/tr_datasets/e_cursive_curves/data/trajectories/plot.py
|
18
|
1043
|
#!/usr/bin/env python
import glob
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
pathname = os.path.dirname(sys.argv[0])
if pathname:
os.chdir(pathname)
n_dim = None
trains = []
for fname in glob.glob("tl*"):
t = np.loadtxt(fname)
trains.append(t)
tests = []
for fname in glob.glob("tt*"):
t = np.loadtxt(fname)
tests.append(t)
trial_results= []
for fname in glob.glob("rtl*"):
t = np.loadtxt(fname)
trial_results.append(t)
test_results= []
for fname in glob.glob("rtt*"):
t = np.loadtxt(fname)
test_results.append(t)
fig = plt.figure()
ax = fig.add_subplot(111, aspect="equal")
for d in trains:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color="blue", lw=3, alpha=0.5)
for d in tests:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color="red", lw=3, alpha=0.5)
for d in trial_results:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color=[0,0,.5], lw=2)
for d in test_results:
ax.plot(d[:,1] +d[:,7]*6, d[:,2] +d[:,8]*6, color=[.5,0,0], lw=2)
plt.show()
|
gpl-2.0
|
pgm/StarCluster
|
utils/scimage.py
|
20
|
19393
|
#!/usr/bin/env python
"""
This script is meant to be run inside of a ubuntu cloud image available at
uec-images.ubuntu.com::
$ EC2_UBUNTU_IMG_URL=http://uec-images.ubuntu.com/precise/current
$ wget $EC2_UBUNTU_IMG_URL/precise-server-cloudimg-amd64.tar.gz
or::
$ wget $EC2_UBUNTU_IMG_URL/precise-server-cloudimg-i386.tar.gz
After downloading a Ubuntu cloud image the next step is to extract the image::
$ tar xvzf precise-server-cloudimg-amd64.tar.gz
Then resize it to 10GB::
$ e2fsck -f precise-server-cloudimg-amd64.img
$ resize2fs precise-server-cloudimg-amd64.img 10G
Next you need to mount the image::
$ mkdir /tmp/img-mount
$ mount precise-server-cloudimg-amd64.img /tmp/img-mount
$ mount -t proc none /tmp/img-mount/proc
$ mount -t sysfs none /tmp/img-mount/sys
$ mount -o bind /dev /tmp/img-mount/dev
$ mount -t devpts none /tmp/img-mount/dev/pts
$ mount -o rbind /var/run/dbus /tmp/img-mount/var/run/dbus
Copy /etc/resolv.conf and /etc/mtab to the image::
$ mkdir -p /tmp/img-mount/var/run/resolvconf
$ cp /etc/resolv.conf /tmp/img-mount/var/run/resolvconf/resolv.conf
$ grep -v rootfs /etc/mtab > /tmp/img-mount/etc/mtab
Next copy this script inside the image::
$ cp /path/to/scimage.py /tmp/img-mount/root/scimage.py
Finally chroot inside the image and run this script:
$ chroot /tmp/img-mount /bin/bash
$ cd $HOME
$ python scimage.py
"""
import os
import sys
import glob
import shutil
import fileinput
import subprocess
import multiprocessing
SRC_DIR = "/usr/local/src"
APT_SOURCES_FILE = "/etc/apt/sources.list"
BUILD_UTILS_PKGS = "build-essential devscripts debconf debconf-utils dpkg-dev "
BUILD_UTILS_PKGS += "cdbs patch python-setuptools python-pip python-nose"
CLOUD_CFG_FILE = '/etc/cloud/cloud.cfg'
GRID_SCHEDULER_GIT = 'git://github.com/jtriley/gridscheduler.git'
CLOUDERA_ARCHIVE_KEY = 'http://archive.cloudera.com/debian/archive.key'
CLOUDERA_APT = 'http://archive.cloudera.com/debian maverick-cdh3u5 contrib'
CONDOR_APT = 'http://www.cs.wisc.edu/condor/debian/development lenny contrib'
NUMPY_SCIPY_SITE_CFG = """\
[DEFAULT]
library_dirs = /usr/lib
include_dirs = /usr/include:/usr/include/suitesparse
[blas_opt]
libraries = ptf77blas, ptcblas, atlas
[lapack_opt]
libraries = lapack, ptf77blas, ptcblas, atlas
[amd]
amd_libs = amd
[umfpack]
umfpack_libs = umfpack
[fftw]
libraries = fftw3
"""
STARCLUSTER_MOTD = """\
#!/bin/sh
cat<<"EOF"
_ _ _
__/\_____| |_ __ _ _ __ ___| |_ _ ___| |_ ___ _ __
\ / __| __/ _` | '__/ __| | | | / __| __/ _ \ '__|
/_ _\__ \ || (_| | | | (__| | |_| \__ \ || __/ |
\/ |___/\__\__,_|_| \___|_|\__,_|___/\__\___|_|
StarCluster Ubuntu 12.04 AMI
Software Tools for Academics and Researchers (STAR)
Homepage: http://star.mit.edu/cluster
Documentation: http://star.mit.edu/cluster/docs/latest
Code: https://github.com/jtriley/StarCluster
Mailing list: [email protected]
This AMI Contains:
* Open Grid Scheduler (OGS - formerly SGE) queuing system
* Condor workload management system
* OpenMPI compiled with Open Grid Scheduler support
* OpenBLAS- Highly optimized Basic Linear Algebra Routines
* NumPy/SciPy linked against OpenBlas
* IPython 0.13 with parallel support
* and more! (use 'dpkg -l' to show all installed packages)
Open Grid Scheduler/Condor cheat sheet:
* qstat/condor_q - show status of batch jobs
* qhost/condor_status- show status of hosts, queues, and jobs
* qsub/condor_submit - submit batch jobs (e.g. qsub -cwd ./job.sh)
* qdel/condor_rm - delete batch jobs (e.g. qdel 7)
* qconf - configure Open Grid Scheduler system
Current System Stats:
EOF
landscape-sysinfo | grep -iv 'graph this data'
"""
CLOUD_INIT_CFG = """\
user: ubuntu
disable_root: 0
preserve_hostname: False
# datasource_list: [ "NoCloud", "OVF", "Ec2" ]
cloud_init_modules:
- bootcmd
- resizefs
- set_hostname
- update_hostname
- update_etc_hosts
- rsyslog
- ssh
cloud_config_modules:
- mounts
- ssh-import-id
- locale
- set-passwords
- grub-dpkg
- timezone
- puppet
- chef
- mcollective
- disable-ec2-metadata
- runcmd
cloud_final_modules:
- rightscale_userdata
- scripts-per-once
- scripts-per-boot
- scripts-per-instance
- scripts-user
- keys-to-console
- final-message
apt_sources:
- source: deb $MIRROR $RELEASE multiverse
- source: deb %(CLOUDERA_APT)s
- source: deb-src %(CLOUDERA_APT)s
- source: deb %(CONDOR_APT)s
""" % dict(CLOUDERA_APT=CLOUDERA_APT, CONDOR_APT=CONDOR_APT)
OPENBLAS_0_1ALPHA_2_PATCH = """\
diff --git a/Makefile.system b/Makefile.system
index f0487ac..84f41a7 100644
--- a/Makefile.system
+++ b/Makefile.system
@@ -27,7 +27,13 @@ HOSTCC = $(CC)
endif
ifdef TARGET
-GETARCH_FLAGS += -DFORCE_$(TARGET)
+GETARCH_FLAGS := -DFORCE_$(TARGET)
+endif
+
+#TARGET_CORE will override TARGET which is used in DYNAMIC_ARCH=1.
+#
+ifdef TARGET_CORE
+GETARCH_FLAGS := -DFORCE_$(TARGET_CORE)
endif
ifdef INTERFACE64
"""
def run_command(cmd, ignore_failure=False, failure_callback=None,
get_output=False):
kwargs = {}
if get_output:
kwargs.update(dict(stdout=subprocess.PIPE, stderr=subprocess.PIPE))
p = subprocess.Popen(cmd, shell=True, **kwargs)
output = []
if get_output:
line = None
while line != '':
line = p.stdout.readline()
if line != '':
output.append(line)
print line,
for line in p.stderr.readlines():
if line != '':
output.append(line)
print line,
retval = p.wait()
if retval != 0:
errmsg = "command '%s' failed with status %d" % (cmd, retval)
if failure_callback:
ignore_failure = failure_callback(retval)
if not ignore_failure:
raise Exception(errmsg)
else:
sys.stderr.write(errmsg + '\n')
if get_output:
return retval, ''.join(output)
return retval
def apt_command(cmd):
dpkg_opts = "Dpkg::Options::='--force-confnew'"
cmd = "apt-get -o %s -y --force-yes %s" % (dpkg_opts, cmd)
cmd = "DEBIAN_FRONTEND='noninteractive' " + cmd
run_command(cmd)
def apt_install(pkgs):
apt_command('install %s' % pkgs)
def chdir(directory):
opts = glob.glob(directory)
isdirlist = [o for o in opts if os.path.isdir(o)]
if len(isdirlist) > 1:
raise Exception("more than one dir matches: %s" % directory)
os.chdir(isdirlist[0])
def _fix_atlas_rules(rules_file='debian/rules'):
for line in fileinput.input(rules_file, inplace=1):
if 'ATLAS=None' not in line:
print line,
def configure_apt_sources():
srcfile = open(APT_SOURCES_FILE)
contents = srcfile.readlines()
srcfile.close()
srclines = []
for line in contents:
if not line.strip() or line.startswith('#'):
continue
parts = line.split()
if parts[0] == 'deb':
parts[0] = 'deb-src'
srclines.append(' '.join(parts).strip())
srcfile = open(APT_SOURCES_FILE, 'w')
srcfile.write(''.join(contents))
srcfile.write('\n'.join(srclines) + '\n')
srcfile.write('deb %s\n' % CLOUDERA_APT)
srcfile.write('deb-src %s\n' % CLOUDERA_APT)
srcfile.write('deb %s\n' % CONDOR_APT)
srcfile.close()
run_command('gpg --keyserver keyserver.ubuntu.com --recv-keys 0F932C9C')
run_command('curl -s %s | sudo apt-key add -' % CLOUDERA_ARCHIVE_KEY)
apt_install('debian-archive-keyring')
def upgrade_packages():
apt_command('update')
apt_command('upgrade')
def install_build_utils():
"""docstring for configure_build"""
apt_install(BUILD_UTILS_PKGS)
def install_gridscheduler():
chdir(SRC_DIR)
apt_command('build-dep gridengine')
if os.path.isfile('gridscheduler-scbuild.tar.gz'):
run_command('tar xvzf gridscheduler-scbuild.tar.gz')
run_command('mv gridscheduler /opt/sge6-fresh')
return
run_command('git clone %s' % GRID_SCHEDULER_GIT)
sts, out = run_command('readlink -f `which java`', get_output=True)
java_home = out.strip().split('/jre')[0]
chdir(os.path.join(SRC_DIR, 'gridscheduler', 'source'))
run_command('git checkout -t -b develop origin/develop')
env = 'JAVA_HOME=%s' % java_home
run_command('%s ./aimk -only-depend' % env)
run_command('%s scripts/zerodepend' % env)
run_command('%s ./aimk depend' % env)
run_command('%s ./aimk -no-secure -no-gui-inst' % env)
sge_root = '/opt/sge6-fresh'
os.mkdir(sge_root)
env += ' SGE_ROOT=%s' % sge_root
run_command('%s scripts/distinst -all -local -noexit -y -- man' % env)
def install_condor():
chdir(SRC_DIR)
run_command("rm /var/lock")
apt_install('condor=7.7.2-1')
run_command('echo condor hold | dpkg --set-selections')
run_command('ln -s /etc/condor/condor_config /etc/condor_config.local')
run_command('mkdir /var/lib/condor/log')
run_command('mkdir /var/lib/condor/run')
run_command('chown -R condor:condor /var/lib/condor/log')
run_command('chown -R condor:condor /var/lib/condor/run')
def install_torque():
chdir(SRC_DIR)
apt_install('torque-server torque-mom torque-client')
def install_pydrmaa():
chdir(SRC_DIR)
run_command('pip install drmaa')
def install_atlas():
"""docstring for install_atlas"""
chdir(SRC_DIR)
apt_command('build-dep atlas')
if glob.glob("*atlas*.deb"):
run_command('dpkg -i *atlas*.deb')
return
apt_command('source atlas')
chdir('atlas-*')
run_command('fakeroot debian/rules custom')
run_command('dpkg -i ../*atlas*.deb')
def install_openblas():
"""docstring for install_openblas"""
chdir(SRC_DIR)
apt_command('build-dep libopenblas-dev')
if glob.glob("*openblas*.deb"):
run_command('dpkg -i *openblas*.deb')
else:
apt_command('source libopenblas-dev')
chdir('openblas-*')
patch = open('fix_makefile_system.patch', 'w')
patch.write(OPENBLAS_0_1ALPHA_2_PATCH)
patch.close()
run_command('patch -p1 < %s' % patch.name)
rule_file = open('Makefile.rule', 'a')
# NO_AFFINITY=1 is required to utilize all cores on all non
# cluster-compute/GPU instance types due to the shared virtualization
# layer not supporting processor affinity properly. However, Cluster
# Compute/GPU instance types use a near-bare-metal hypervisor which
# *does* support processor affinity. From minimal testing it appears
# that there is a ~20% increase in performance when using affinity on
# cc1/cg1 types implying NO_AFFINITY=1 should *not* be set for cluster
# compute/GPU AMIs.
lines = ['DYNAMIC_ARCH=1', 'NUM_THREADS=64', 'NO_LAPACK=1',
'NO_AFFINITY=1']
rule_file.write('\n'.join(lines))
rule_file.close()
run_command('fakeroot debian/rules custom')
run_command('dpkg -i ../*openblas*.deb')
run_command('echo libopenblas-base hold | dpkg --set-selections')
run_command('echo libopenblas-dev hold | dpkg --set-selections')
def install_numpy():
"""docstring for install_numpy"""
chdir(SRC_DIR)
apt_command('build-dep python-numpy')
if glob.glob('*numpy*.deb'):
run_command('dpkg -i *numpy*.deb')
return
apt_command('source python-numpy')
chdir('python-numpy*')
sitecfg = open('site.cfg', 'w')
sitecfg.write(NUMPY_SCIPY_SITE_CFG)
sitecfg.close()
_fix_atlas_rules()
def _deb_failure_callback(retval):
if not glob.glob('../*numpy*.deb'):
return False
return True
run_command('dpkg-buildpackage -rfakeroot -b',
failure_callback=_deb_failure_callback)
run_command('dpkg -i ../*numpy*.deb')
def install_scipy():
"""docstring for install_scipy"""
chdir(SRC_DIR)
apt_command('build-dep python-scipy')
if glob.glob('*scipy*.deb'):
run_command('dpkg -i *scipy*.deb')
return
apt_command('source python-scipy')
chdir('python-scipy*')
sitecfg = open('site.cfg', 'w')
sitecfg.write(NUMPY_SCIPY_SITE_CFG)
sitecfg.close()
_fix_atlas_rules()
def _deb_failure_callback(retval):
if not glob.glob('../*numpy*.deb'):
return False
return True
run_command('dpkg-buildpackage -rfakeroot -b',
failure_callback=_deb_failure_callback)
run_command('dpkg -i ../*scipy*.deb')
def install_pandas():
"""docstring for install_pandas"""
chdir(SRC_DIR)
apt_command('build-dep pandas')
run_command('pip install pandas')
def install_openmpi():
chdir(SRC_DIR)
apt_command('build-dep openmpi')
apt_install('blcr-util')
if glob.glob('*openmpi*.deb'):
run_command('dpkg -i *openmpi*.deb')
else:
apt_command('source openmpi')
chdir('openmpi*')
for line in fileinput.input('debian/rules', inplace=1):
print line,
if '--enable-heterogeneous' in line:
print ' --with-sge \\'
def _deb_failure_callback(retval):
if not glob.glob('../*openmpi*.deb'):
return False
return True
run_command('dch --local=\'+custom\' '
'"custom build on: `uname -s -r -v -m -p -i -o`"')
run_command('dpkg-buildpackage -rfakeroot -b',
failure_callback=_deb_failure_callback)
run_command('dpkg -i ../*openmpi*.deb')
sts, out = run_command('ompi_info | grep -i grid', get_output=True)
if 'gridengine' not in out:
raise Exception("failed to build OpenMPI with "
"Open Grid Scheduler support")
run_command('echo libopenmpi1.3 hold | dpkg --set-selections')
run_command('echo libopenmpi-dev hold | dpkg --set-selections')
run_command('echo libopenmpi-dbg hold | dpkg --set-selections')
run_command('echo openmpi-bin hold | dpkg --set-selections')
run_command('echo openmpi-checkpoint hold | dpkg --set-selections')
run_command('echo openmpi-common hold | dpkg --set-selections')
run_command('echo openmpi-doc hold | dpkg --set-selections')
def install_hadoop():
chdir(SRC_DIR)
hadoop_pkgs = ['namenode', 'datanode', 'tasktracker', 'jobtracker',
'secondarynamenode']
pkgs = ['hadoop-0.20'] + ['hadoop-0.20-%s' % pkg for pkg in hadoop_pkgs]
apt_install(' '.join(pkgs))
run_command('easy_install dumbo')
def install_ipython():
chdir(SRC_DIR)
apt_install('libzmq-dev')
run_command('pip install ipython tornado pygments pyzmq')
mjax_install = 'from IPython.external.mathjax import install_mathjax'
mjax_install += '; install_mathjax()'
run_command("python -c '%s'" % mjax_install)
def configure_motd():
for f in glob.glob('/etc/update-motd.d/*'):
os.unlink(f)
motd = open('/etc/update-motd.d/00-starcluster', 'w')
motd.write(STARCLUSTER_MOTD)
motd.close()
os.chmod(motd.name, 0755)
def configure_cloud_init():
"""docstring for configure_cloud_init"""
cloudcfg = open('/etc/cloud/cloud.cfg', 'w')
cloudcfg.write(CLOUD_INIT_CFG)
cloudcfg.close()
def configure_bash():
completion_line_found = False
for line in fileinput.input('/etc/bash.bashrc', inplace=1):
if 'bash_completion' in line and line.startswith('#'):
print line.replace('#', ''),
completion_line_found = True
elif completion_line_found:
print line.replace('#', ''),
completion_line_found = False
else:
print line,
aliasfile = open('/root/.bash_aliases', 'w')
aliasfile.write("alias ..='cd ..'\n")
aliasfile.close()
def setup_environ():
num_cpus = multiprocessing.cpu_count()
os.environ['MAKEFLAGS'] = '-j%d' % (num_cpus + 1)
os.environ['DEBIAN_FRONTEND'] = "noninteractive"
if os.path.isfile('/sbin/initctl') and not os.path.islink('/sbin/initctl'):
run_command('mv /sbin/initctl /sbin/initctl.bak')
run_command('ln -s /bin/true /sbin/initctl')
def install_nfs():
chdir(SRC_DIR)
run_command('initctl reload-configuration')
apt_install('nfs-kernel-server')
run_command('ln -s /etc/init.d/nfs-kernel-server /etc/init.d/nfs')
def install_default_packages():
# stop mysql for interactively asking for password
preseedf = '/tmp/mysql-preseed.txt'
mysqlpreseed = open(preseedf, 'w')
preseeds = """\
mysql-server mysql-server/root_password select
mysql-server mysql-server/root_password seen true
mysql-server mysql-server/root_password_again select
mysql-server mysql-server/root_password_again seen true
"""
mysqlpreseed.write(preseeds)
mysqlpreseed.close()
run_command('debconf-set-selections < %s' % mysqlpreseed.name)
run_command('rm %s' % mysqlpreseed.name)
pkgs = "python-dev git vim mercurial subversion cvs encfs "
pkgs += "openmpi-bin libopenmpi-dev python-django "
pkgs += "keychain screen tmux zsh ksh csh tcsh python-mpi4py "
pkgs += "python-virtualenv python-imaging python-boto python-matplotlib "
pkgs += "unzip rar unace build-essential gfortran ec2-api-tools "
pkgs += "ec2-ami-tools mysql-server mysql-client apache2 liblapack-dev "
pkgs += "libapache2-mod-wsgi sysv-rc-conf pssh emacs cython irssi htop "
pkgs += "python-distutils-extra vim-scripts python-ctypes python-pudb "
pkgs += "mosh python-scipy python-numpy default-jdk mpich2 xvfb"
apt_install(pkgs)
def configure_init():
for script in ['nfs-kernel-server', 'hadoop', 'condor', 'apache', 'mysql']:
run_command('find /etc/rc* -iname \*%s\* -delete' % script)
def cleanup():
run_command('rm -f /etc/resolv.conf')
run_command('rm -rf /var/run/resolvconf')
run_command('rm -f /etc/mtab')
run_command('rm -rf /root/*')
exclude = ['/root/.bashrc', '/root/.profile', '/root/.bash_aliases']
for dot in glob.glob("/root/.*"):
if dot not in exclude:
run_command('rm -rf %s' % dot)
for path in glob.glob('/usr/local/src/*'):
if os.path.isdir(path):
shutil.rmtree(path)
run_command('rm -f /var/cache/apt/archives/*.deb')
run_command('rm -f /var/cache/apt/archives/partial/*')
for f in glob.glob('/etc/profile.d'):
if 'byobu' in f:
run_command('rm -f %s' % f)
if os.path.islink('/sbin/initctl') and os.path.isfile('/sbin/initctl.bak'):
run_command('mv -f /sbin/initctl.bak /sbin/initctl')
def main():
"""docstring for main"""
if os.getuid() != 0:
sys.stderr.write('you must be root to run this script\n')
return
setup_environ()
configure_motd()
configure_cloud_init()
configure_bash()
configure_apt_sources()
upgrade_packages()
install_build_utils()
install_default_packages()
install_gridscheduler()
install_condor()
#install_torque()
install_pydrmaa()
# Replace ATLAS with OpenBLAS
# install_atlas()
install_openblas()
# Custom NumPy/SciPy install is no longer needed in 12.04
# install_numpy()
# install_scipy()
install_pandas()
install_ipython()
install_openmpi()
install_hadoop()
install_nfs()
configure_init()
cleanup()
if __name__ == '__main__':
main()
|
gpl-3.0
|
Kait-tt/tacowasa
|
addons/stats/scripts/test_interval_predict.py
|
1
|
9286
|
# coding:utf-8
import json
from statistics import mean, stdev, median
import argparse
import matplotlib.pyplot as plt
import predictors
import math
method = predictors.AveragePredictorEachCostInterval
project_names = ['tacowasa']
COLORS = ['green', 'blue', 'red']
def main():
parser = argparse.ArgumentParser(description='Test interval prediction')
parser.add_argument('-s', '--src', required=True)
args = parser.parse_args()
projects = json.load(open(args.src, 'r'))
# filter project
# if len(project_names):
# projects = [x for x in projects if x['projectName'] in project_names]
results = calc_all(projects, method)
# filter if same user and same cost tasks were over 3
# for project in projects:
# result = [x for x in results if x['projectName'] == project['projectName']][0]
# tasks = []
# predicates = []
# actuals = []
# memo = {}
# for i in range(len(project['tasks'])):
# task = project['tasks'][i]
# key = '{}_{}'.format(task['cost'], task['userId'])
# if key not in memo:
# memo[key] = 0
# if memo[key] <= 3:
# tasks.append(task)
# predicates.append(result['predicates'][i])
# actuals.append(result['actuals'][i])
# memo[key] += 1
#
# project['tasks'] = tasks
# result['predicates'] = predicates
# result['actuals'] = actuals
print_results_table(projects, results)
# plot_hist(projects, results)
# plot_timeline(projects, results)
def calc_all(projects, method):
results = []
for project in projects:
print('load : {}'.format(project['projectName']))
tasks = []
predicates = []
actuals = []
for task in project['tasks']:
predicate = method.predicate(tasks, task['userId'], task['cost'])
if predicate[0] is None:
predicates.append((0, 0, 0, 0, 0))
else:
predicates.append(predicate)
actuals.append(task['actualWorkTime'])
tasks.append(task)
results.append({'projectName': project['projectName'], 'predicates': predicates, 'actuals': actuals})
return results
def uniq_users(project):
return list(set([x['userId'] for x in project['tasks']]))
def uniq_all_users(projects):
users = []
for project in projects:
users.extend(uniq_users(project))
return list(set(users))
def uniq_all_cost(projects):
cost = []
for project in projects:
for task in project['tasks']:
cost.append(task['cost'])
return list(set(cost))
def plot_hist(projects, results):
ucost = uniq_all_cost(projects)
es = {}
for cost in ucost:
es[cost] = []
for project in projects:
tasks = project['tasks']
result = [x for x in results if x['projectName'] == project['projectName']][0]
predicates = result['predicates']
actuals = result['actuals']
for i in range(len(tasks)):
e = min(abs(predicates[i][0] - actuals[i]) / actuals[i], 10)
y = predicates[i]
x = actuals[i]
# e = 0 if y[1] <= x <= y[2] else min(10, abs(y[1] - x) / x, abs(y[2] - x) / x)
# e = 0 if y[3] <= x <= y[4] else min(10, abs(y[3] - x) / x, abs(y[4] - x) / x)
es[tasks[i]['cost']].append(e)
plt.hist(es.values(), normed=True)
plt.show()
def plot_timeline(projects, results):
users = uniq_all_users(projects)
costs = uniq_all_cost(projects)
n = sum([len(uniq_users(x)) for x in projects])
m = math.floor(math.sqrt(n))
r = math.floor(n / m)
c = math.floor((n + r - 1) / r)
idx = 0
for pi, project in enumerate(projects):
project_name = project['projectName']
tasks = project['tasks']
result = [x for x in results if x['projectName'] == project_name][0]
predicates = result['predicates']
actuals = result['actuals']
for ui, user in enumerate(users):
ymax, xmax = 0, 0
if len([i for i in range(len(tasks)) if tasks[i]['userId'] == user]) > 0:
idx += 1
for ci, cost in enumerate(costs):
idxes = [i for i in range(len(tasks)) if tasks[i]['userId'] == user and tasks[i]['cost'] == cost]
if len(idxes) == 0:
continue
xs = [predicates[i] for i in idxes]
means = [x[0] for x in xs]
mlows = [x[0] - x[1] for x in xs]
mhighs = [x[2] - x[0] for x in xs]
lows = [x[0] - x[3] for x in xs]
highs = [x[4] - x[0] for x in xs]
xs2 = [actuals[i] for i in idxes]
# es = [min(abs(predicates[i][0] - actuals[i]) / actuals[i], 10) for i in idxes]
# es = [
# 0 if xs[i][3] <= xs2[i] <= xs[i][4]
# else min(10, abs(xs[i][3] - xs2[i]) / xs2[i], abs(xs[i][4] - xs2[i]) / xs2[i])
# for i in range(len(idxes))
# ]
xmax = max(xmax, len(xs))
plt.subplot(r, c, idx)
plt.xlim(-1, xmax + 1)
# plt.ylim(0, 11)
plt.title('{} {}'.format(project_name, user), fontsize=10)
# plt.plot(xs2, color=COLORS[ci])
plt.scatter(range(len(xs2)), xs2, marker='o', color=COLORS[ci], s=10,
label='Actual_Cost{}'.format(cost))
plt.errorbar(range(len(means)), means, yerr=[mlows, mhighs], color=COLORS[ci],
elinewidth=2, label='Predict_Cost{}'.format(cost))
# plt.errorbar(range(len(means)), means, yerr=[lows, highs], color=COLORS[ci])
# plt.scatter(range(len(means)), xs2, marker='o', color=COLORS[ci], s=10)
plt.legend()
plt.show()
def print_results_table(projects, results):
data = []
total_es = []
total_ins = []
total_ws = []
cost_es = {1: [], 3: [], 5: []}
cost_ins = {1: [], 3: [], 5: []}
cost_ws = {1: [], 3: [], 5: []}
for project in projects:
project_name = project['projectName']
tasks = project['tasks']
result = [x for x in results if x['projectName'] == project_name][0]
predicates = result['predicates']
actuals = result['actuals']
project_es = []
project_ins = []
project_ws = []
for user in uniq_all_users(projects):
for cost in uniq_all_cost(projects):
idxes = [i for i in range(len(tasks)) if tasks[i]['userId'] == user and tasks[i]['cost'] == cost]
if len(idxes) < 2:
continue
es = [abs(predicates[i][0] - actuals[i]) / actuals[i] for i in idxes]
ins = [predicates[i][1] <= actuals[i] <= predicates[i][2] for i in idxes]
ws = [float(predicates[i][2] - predicates[i][1]) for i in idxes]
project_es.extend(es)
project_ins.extend(ins)
project_ws.extend(ws)
cost_es[cost].extend(es)
cost_ins[cost].extend(ins)
cost_ws[cost].extend(ws)
total_es.extend(es)
total_ins.extend(ins)
total_ws.extend(ws)
data.append({
'name': '{} ({}, {})'.format(project_name, user, cost),
'es': es,
'ins': ins,
'ws': ws
})
data.append({
'name': '{}'.format(project_name),
'es': project_es,
'ins': project_ins,
'ws': project_ws
})
for cost in [1, 3, 5]:
data.append({
'name': 'Cost {}'.format(cost),
'es': cost_es[cost],
'ins': cost_ins[cost],
'ws': cost_ws[cost]
})
data.append({
'name': 'Total',
'es': total_es,
'ins': total_ins,
'ws': total_ws
})
scores = []
for x in data:
es = x['es']
ins = x['ins']
ws = x['ws']
scores.append({
'name': x['name'],
'sum': sum(es),
'mean': mean(es) if len(es) > 0 else 0.0,
'stddev': stdev(es) if len(es) >= 2 else 0.0,
'median': median(es) if len(es) > 0 else 0.0,
'cover': ins.count(True),
'uncover': ins.count(False),
'coverp': float(ins.count(True)) / len(ins) if len(ins) > 0 else 0.0,
'wmean': mean(ws) if len(ws) > 0 else 0.0
})
print('{:25} | {:8} | {:7} | {:7} | {:7} | {:5} | {:7} | {:7} | {:7}'
.format('Name', 'Sum', 'Mean', 'Median', 'Stddev', 'Cover', 'Uncover', 'CoverP', 'WMean'))
for idx, score in enumerate(scores):
print('{:25} | {:8.3f} | {:7.3f} | {:7.3f} | {:7.3f} | {:5} | {:7} | {:7.3} | {:7.4}'.
format(score['name'], score['sum'], score['mean'], score['median'], score['stddev'],
score['cover'], score['uncover'], score['coverp'], score['wmean']))
if __name__ == '__main__':
main()
|
mit
|
miklos1/fayette
|
generate.py
|
1
|
3091
|
import os
import numpy
import pandas
assembly = pandas.read_csv("assembly.csv")
assembly["rate"] = assembly.num_dofs / assembly.parloop_time
matvec = pandas.read_csv("matvec.csv")
matvec["rate"] = matvec.num_dofs / matvec.matvec_time
matfree = pandas.read_csv("matfree.csv")
matfree["rate"] = matfree.num_dofs / matfree.matmult_time
print('Files read.')
outdir = "data"
os.makedirs(outdir, exist_ok=True)
mutate = {"poisson": "poisson",
"hyperelasticity": "hyperelastic",
"curl_curl": "curlcurl",
"stokes_momentum": "stokes_momentum"}
def curve(dataset, problem, config, exp, prefix=""):
name = problem
if config == "base":
mode = "coffee"
elif config == "spectral":
mode = "spectral"
elif config == "underintegration":
problem = {"poisson": "poisson_gll"}[problem]
mode = "spectral"
elif config == "spmv":
pass
else:
assert False, "Unexpected configuration!"
filtered = dataset.loc[lambda r: r.problem == problem]
if config != "spmv":
filtered = filtered.loc[lambda r: r.tsfc_mode == mode]
num_procs, = set(filtered["num_procs"])
series = filtered.groupby(["degree"]).mean()["rate"]
series.to_csv("%s/%s%s_%s.csv" % (outdir, prefix, mutate[name], config), header=True)
array = numpy.array(list(series.to_dict().items()))
x = array[:, 0]
y = array[:, 1]
logC = numpy.log(y) - numpy.log(x**3 / (x+1)**exp)
rho = logC.std() / logC.mean()
if rho > 0.1:
print(problem, config, 'rho =', rho)
C = numpy.exp(logC.mean())
return C, int(numpy.floor(x.min())), int(numpy.ceil(x.max()))
def linear(problem):
with open("%s/%s.dat" % (outdir, mutate[problem]), 'w') as f:
print('C a b', file=f)
C, a, b = curve(matvec, problem, "spmv", 6)
# print(C, a, b, file=f)
C, a, b = curve(matfree, problem, "base", 6)
print(C, a, b, file=f)
C, a, b = curve(matfree, problem, "spectral", 4)
print(C, a, b, file=f)
def bilinear(problem):
with open("%s/bi%s.dat" % (outdir, mutate[problem]), 'w') as f:
print('C a b', file=f)
C, a, b = curve(assembly, problem, "base", 9, prefix="bi")
print(C, a, b, file=f)
C, a, b = curve(assembly, problem, "spectral", 7, prefix="bi")
print(C, a, b, file=f)
def bilinear_poisson():
with open("%s/bipoisson.dat" % (outdir,), 'w') as f:
print('C a b', file=f)
C, a, b = curve(assembly, "poisson", "base", 9, prefix="bi")
print(C, a, b, file=f)
C, a, b = curve(assembly, "poisson", "spectral", 7, prefix="bi")
print(C, a, b, file=f)
C, a, b = curve(assembly, "poisson", "underintegration", 6, prefix="bi")
print(C, a, b, file=f)
def bilinear_stokes_momentum():
curve(assembly, "stokes_momentum", "base", 9)
curve(assembly, "stokes_momentum", "spectral", 9)
bilinear_stokes_momentum()
bilinear_poisson()
bilinear("hyperelasticity")
bilinear("curl_curl")
linear("poisson")
linear("hyperelasticity")
linear("curl_curl")
|
mit
|
deepfield/ibis
|
ibis/pandas/execution/join.py
|
1
|
3316
|
import operator
import pandas as pd
import ibis.expr.operations as ops
from ibis.pandas.dispatch import execute_node
from ibis.pandas.core import execute
from ibis.pandas.execution import constants
def _compute_join_column(column_expr, **kwargs):
column_op = column_expr.op()
if isinstance(column_op, ops.TableColumn):
new_column = column_op.name
else:
new_column = execute(column_expr, **kwargs)
root_table, = column_op.root_tables()
return new_column, root_table
@execute_node.register(ops.Join, pd.DataFrame, pd.DataFrame)
def execute_materialized_join(op, left, right, **kwargs):
op_type = type(op)
try:
how = constants.JOIN_TYPES[op_type]
except KeyError:
raise NotImplementedError('{} not supported'.format(op_type.__name__))
left_op = op.left.op()
right_op = op.right.op()
on = {left_op: [], right_op: []}
for predicate in map(operator.methodcaller('op'), op.predicates):
if not isinstance(predicate, ops.Equals):
raise TypeError(
'Only equality join predicates supported with pandas'
)
new_left_column, left_pred_root = _compute_join_column(
predicate.left,
**kwargs
)
on[left_pred_root].append(new_left_column)
new_right_column, right_pred_root = _compute_join_column(
predicate.right,
**kwargs
)
on[right_pred_root].append(new_right_column)
df = pd.merge(
left, right,
how=how,
left_on=on[left_op],
right_on=on[right_op],
suffixes=constants.JOIN_SUFFIXES,
)
return df
@execute_node.register(
ops.AsOfJoin, pd.DataFrame, pd.DataFrame, (pd.Timedelta, type(None))
)
def execute_asof_join(op, left, right, tolerance, **kwargs):
overlapping_columns = frozenset(left.columns) & frozenset(right.columns)
left_on, right_on = _extract_predicate_names(op.predicates)
left_by, right_by = _extract_predicate_names(op.by)
_validate_columns(
overlapping_columns, left_on, right_on, left_by, right_by)
return pd.merge_asof(
left=left,
right=right,
left_on=left_on,
right_on=right_on,
left_by=left_by or None,
right_by=right_by or None,
tolerance=tolerance,
)
def _extract_predicate_names(predicates):
lefts = []
rights = []
for predicate in map(operator.methodcaller('op'), predicates):
if not isinstance(predicate, ops.Equals):
raise TypeError(
'Only equality join predicates supported with pandas'
)
left_name = predicate.left._name
right_name = predicate.right._name
lefts.append(left_name)
rights.append(right_name)
return lefts, rights
def _validate_columns(orig_columns, *key_lists):
overlapping_columns = orig_columns.difference(
item for sublist in key_lists for item in sublist
)
if overlapping_columns:
raise ValueError(
'left and right DataFrame columns overlap on {} in a join. '
'Please specify the columns you want to select from the join, '
'e.g., join[left.column1, right.column2, ...]'.format(
overlapping_columns
)
)
|
apache-2.0
|
ahoyosid/scikit-learn
|
sklearn/tests/test_pipeline.py
|
10
|
14095
|
"""
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises, assert_raises_regex
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)
))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA()
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
|
bsd-3-clause
|
fzalkow/scikit-learn
|
benchmarks/bench_plot_incremental_pca.py
|
374
|
6430
|
"""
========================
IncrementalPCA benchmark
========================
Benchmarks for IncrementalPCA
"""
import numpy as np
import gc
from time import time
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import IncrementalPCA, RandomizedPCA, PCA
def plot_results(X, y, label):
plt.plot(X, y, label=label, marker='o')
def benchmark(estimator, data):
gc.collect()
print("Benching %s" % estimator)
t0 = time()
estimator.fit(data)
training_time = time() - t0
data_t = estimator.transform(data)
data_r = estimator.inverse_transform(data_t)
reconstruction_error = np.mean(np.abs(data - data_r))
return {'time': training_time, 'error': reconstruction_error}
def plot_feature_times(all_times, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_times['pca'], label="PCA")
plot_results(all_components, all_times['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_times['rpca'], label="RandomizedPCA")
plt.legend(loc="upper left")
plt.suptitle("Algorithm runtime vs. n_components\n \
LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Time (seconds)")
def plot_feature_errors(all_errors, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_errors['pca'], label="PCA")
plot_results(all_components, all_errors['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_errors['rpca'], label="RandomizedPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. n_components\n"
"LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Mean absolute error")
def plot_batch_times(all_times, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_times['pca'], label="PCA")
plot_results(all_batch_sizes, all_times['rpca'], label="RandomizedPCA")
plot_results(all_batch_sizes, all_times['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm runtime vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Time (seconds)")
def plot_batch_errors(all_errors, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_errors['pca'], label="PCA")
plot_results(all_batch_sizes, all_errors['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Mean absolute error")
def fixed_batch_size_comparison(data):
all_features = [i.astype(int) for i in np.linspace(data.shape[1] // 10,
data.shape[1], num=5)]
batch_size = 1000
# Compare runtimes and error for fixed batch size
all_times = defaultdict(list)
all_errors = defaultdict(list)
for n_components in all_features:
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('ipca', ipca),
('rpca', rpca)]}
for k in sorted(results_dict.keys()):
all_times[k].append(results_dict[k]['time'])
all_errors[k].append(results_dict[k]['error'])
plot_feature_times(all_times, batch_size, all_features, data)
plot_feature_errors(all_errors, batch_size, all_features, data)
def variable_batch_size_comparison(data):
batch_sizes = [i.astype(int) for i in np.linspace(data.shape[0] // 10,
data.shape[0], num=10)]
for n_components in [i.astype(int) for i in
np.linspace(data.shape[1] // 10,
data.shape[1], num=4)]:
all_times = defaultdict(list)
all_errors = defaultdict(list)
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('rpca', rpca)]}
# Create flat baselines to compare the variation over batch size
all_times['pca'].extend([results_dict['pca']['time']] *
len(batch_sizes))
all_errors['pca'].extend([results_dict['pca']['error']] *
len(batch_sizes))
all_times['rpca'].extend([results_dict['rpca']['time']] *
len(batch_sizes))
all_errors['rpca'].extend([results_dict['rpca']['error']] *
len(batch_sizes))
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=n_components,
batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('ipca',
ipca)]}
all_times['ipca'].append(results_dict['ipca']['time'])
all_errors['ipca'].append(results_dict['ipca']['error'])
plot_batch_times(all_times, n_components, batch_sizes, data)
# RandomizedPCA error is always worse (approx 100x) than other PCA
# tests
plot_batch_errors(all_errors, n_components, batch_sizes, data)
faces = fetch_lfw_people(resize=.2, min_faces_per_person=5)
# limit dataset to 5000 people (don't care who they are!)
X = faces.data[:5000]
n_samples, h, w = faces.images.shape
n_features = X.shape[1]
X -= X.mean(axis=0)
X /= X.std(axis=0)
fixed_batch_size_comparison(X)
variable_batch_size_comparison(X)
plt.show()
|
bsd-3-clause
|
scottpurdy/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/scale.py
|
69
|
13414
|
import textwrap
import numpy as np
from numpy import ma
MaskedArray = ma.MaskedArray
from cbook import dedent
from ticker import NullFormatter, ScalarFormatter, LogFormatterMathtext, Formatter
from ticker import NullLocator, LogLocator, AutoLocator, SymmetricalLogLocator, FixedLocator
from transforms import Transform, IdentityTransform
class ScaleBase(object):
"""
The base class for all scales.
Scales are separable transformations, working on a single dimension.
Any subclasses will want to override:
- :attr:`name`
- :meth:`get_transform`
And optionally:
- :meth:`set_default_locators_and_formatters`
- :meth:`limit_range_for_scale`
"""
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` object
associated with this scale.
"""
raise NotImplementedError
def set_default_locators_and_formatters(self, axis):
"""
Set the :class:`~matplotlib.ticker.Locator` and
:class:`~matplotlib.ticker.Formatter` objects on the given
axis to match this scale.
"""
raise NotImplementedError
def limit_range_for_scale(self, vmin, vmax, minpos):
"""
Returns the range *vmin*, *vmax*, possibly limited to the
domain supported by this scale.
*minpos* should be the minimum positive value in the data.
This is used by log scales to determine a minimum value.
"""
return vmin, vmax
class LinearScale(ScaleBase):
"""
The default linear scale.
"""
name = 'linear'
def __init__(self, axis, **kwargs):
pass
def set_default_locators_and_formatters(self, axis):
"""
Set the locators and formatters to reasonable defaults for
linear scaling.
"""
axis.set_major_locator(AutoLocator())
axis.set_major_formatter(ScalarFormatter())
axis.set_minor_locator(NullLocator())
axis.set_minor_formatter(NullFormatter())
def get_transform(self):
"""
The transform for linear scaling is just the
:class:`~matplotlib.transforms.IdentityTransform`.
"""
return IdentityTransform()
def _mask_non_positives(a):
"""
Return a Numpy masked array where all non-positive values are
masked. If there are no non-positive values, the original array
is returned.
"""
mask = a <= 0.0
if mask.any():
return ma.MaskedArray(a, mask=mask)
return a
class LogScale(ScaleBase):
"""
A standard logarithmic scale. Care is taken so non-positive
values are not plotted.
For computational efficiency (to push as much as possible to Numpy
C code in the common cases), this scale provides different
transforms depending on the base of the logarithm:
- base 10 (:class:`Log10Transform`)
- base 2 (:class:`Log2Transform`)
- base e (:class:`NaturalLogTransform`)
- arbitrary base (:class:`LogTransform`)
"""
name = 'log'
class Log10Transform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = 10.0
def transform(self, a):
a = _mask_non_positives(a * 10.0)
if isinstance(a, MaskedArray):
return ma.log10(a)
return np.log10(a)
def inverted(self):
return LogScale.InvertedLog10Transform()
class InvertedLog10Transform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = 10.0
def transform(self, a):
return ma.power(10.0, a) / 10.0
def inverted(self):
return LogScale.Log10Transform()
class Log2Transform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = 2.0
def transform(self, a):
a = _mask_non_positives(a * 2.0)
if isinstance(a, MaskedArray):
return ma.log(a) / np.log(2)
return np.log2(a)
def inverted(self):
return LogScale.InvertedLog2Transform()
class InvertedLog2Transform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = 2.0
def transform(self, a):
return ma.power(2.0, a) / 2.0
def inverted(self):
return LogScale.Log2Transform()
class NaturalLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = np.e
def transform(self, a):
a = _mask_non_positives(a * np.e)
if isinstance(a, MaskedArray):
return ma.log(a)
return np.log(a)
def inverted(self):
return LogScale.InvertedNaturalLogTransform()
class InvertedNaturalLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
base = np.e
def transform(self, a):
return ma.power(np.e, a) / np.e
def inverted(self):
return LogScale.NaturalLogTransform()
class LogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, base):
Transform.__init__(self)
self.base = base
def transform(self, a):
a = _mask_non_positives(a * self.base)
if isinstance(a, MaskedArray):
return ma.log(a) / np.log(self.base)
return np.log(a) / np.log(self.base)
def inverted(self):
return LogScale.InvertedLogTransform(self.base)
class InvertedLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, base):
Transform.__init__(self)
self.base = base
def transform(self, a):
return ma.power(self.base, a) / self.base
def inverted(self):
return LogScale.LogTransform(self.base)
def __init__(self, axis, **kwargs):
"""
*basex*/*basey*:
The base of the logarithm
*subsx*/*subsy*:
Where to place the subticks between each major tick.
Should be a sequence of integers. For example, in a log10
scale: ``[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]``
will place 10 logarithmically spaced minor ticks between
each major tick.
"""
if axis.axis_name == 'x':
base = kwargs.pop('basex', 10.0)
subs = kwargs.pop('subsx', None)
else:
base = kwargs.pop('basey', 10.0)
subs = kwargs.pop('subsy', None)
if base == 10.0:
self._transform = self.Log10Transform()
elif base == 2.0:
self._transform = self.Log2Transform()
elif base == np.e:
self._transform = self.NaturalLogTransform()
else:
self._transform = self.LogTransform(base)
self.base = base
self.subs = subs
def set_default_locators_and_formatters(self, axis):
"""
Set the locators and formatters to specialized versions for
log scaling.
"""
axis.set_major_locator(LogLocator(self.base))
axis.set_major_formatter(LogFormatterMathtext(self.base))
axis.set_minor_locator(LogLocator(self.base, self.subs))
axis.set_minor_formatter(NullFormatter())
def get_transform(self):
"""
Return a :class:`~matplotlib.transforms.Transform` instance
appropriate for the given logarithm base.
"""
return self._transform
def limit_range_for_scale(self, vmin, vmax, minpos):
"""
Limit the domain to positive values.
"""
return (vmin <= 0.0 and minpos or vmin,
vmax <= 0.0 and minpos or vmax)
class SymmetricalLogScale(ScaleBase):
"""
The symmetrical logarithmic scale is logarithmic in both the
positive and negative directions from the origin.
Since the values close to zero tend toward infinity, there is a
need to have a range around zero that is linear. The parameter
*linthresh* allows the user to specify the size of this range
(-*linthresh*, *linthresh*).
"""
name = 'symlog'
class SymmetricalLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, base, linthresh):
Transform.__init__(self)
self.base = base
self.linthresh = linthresh
self._log_base = np.log(base)
self._linadjust = (np.log(linthresh) / self._log_base) / linthresh
def transform(self, a):
a = np.asarray(a)
sign = np.sign(a)
masked = ma.masked_inside(a, -self.linthresh, self.linthresh, copy=False)
log = sign * ma.log(np.abs(masked)) / self._log_base
if masked.mask.any():
return np.asarray(ma.where(masked.mask,
a * self._linadjust,
log))
else:
return np.asarray(log)
def inverted(self):
return SymmetricalLogScale.InvertedSymmetricalLogTransform(self.base, self.linthresh)
class InvertedSymmetricalLogTransform(Transform):
input_dims = 1
output_dims = 1
is_separable = True
def __init__(self, base, linthresh):
Transform.__init__(self)
self.base = base
self.linthresh = linthresh
self._log_base = np.log(base)
self._log_linthresh = np.log(linthresh) / self._log_base
self._linadjust = linthresh / (np.log(linthresh) / self._log_base)
def transform(self, a):
a = np.asarray(a)
return np.where(a <= self._log_linthresh,
np.where(a >= -self._log_linthresh,
a * self._linadjust,
-(np.power(self.base, -a))),
np.power(self.base, a))
def inverted(self):
return SymmetricalLogScale.SymmetricalLogTransform(self.base)
def __init__(self, axis, **kwargs):
"""
*basex*/*basey*:
The base of the logarithm
*linthreshx*/*linthreshy*:
The range (-*x*, *x*) within which the plot is linear (to
avoid having the plot go to infinity around zero).
*subsx*/*subsy*:
Where to place the subticks between each major tick.
Should be a sequence of integers. For example, in a log10
scale: ``[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]``
will place 10 logarithmically spaced minor ticks between
each major tick.
"""
if axis.axis_name == 'x':
base = kwargs.pop('basex', 10.0)
linthresh = kwargs.pop('linthreshx', 2.0)
subs = kwargs.pop('subsx', None)
else:
base = kwargs.pop('basey', 10.0)
linthresh = kwargs.pop('linthreshy', 2.0)
subs = kwargs.pop('subsy', None)
self._transform = self.SymmetricalLogTransform(base, linthresh)
self.base = base
self.linthresh = linthresh
self.subs = subs
def set_default_locators_and_formatters(self, axis):
"""
Set the locators and formatters to specialized versions for
symmetrical log scaling.
"""
axis.set_major_locator(SymmetricalLogLocator(self.get_transform()))
axis.set_major_formatter(LogFormatterMathtext(self.base))
axis.set_minor_locator(SymmetricalLogLocator(self.get_transform(), self.subs))
axis.set_minor_formatter(NullFormatter())
def get_transform(self):
"""
Return a :class:`SymmetricalLogTransform` instance.
"""
return self._transform
_scale_mapping = {
'linear' : LinearScale,
'log' : LogScale,
'symlog' : SymmetricalLogScale
}
def get_scale_names():
names = _scale_mapping.keys()
names.sort()
return names
def scale_factory(scale, axis, **kwargs):
"""
Return a scale class by name.
ACCEPTS: [ %(names)s ]
"""
scale = scale.lower()
if scale is None:
scale = 'linear'
if scale not in _scale_mapping:
raise ValueError("Unknown scale type '%s'" % scale)
return _scale_mapping[scale](axis, **kwargs)
scale_factory.__doc__ = dedent(scale_factory.__doc__) % \
{'names': " | ".join(get_scale_names())}
def register_scale(scale_class):
"""
Register a new kind of scale.
*scale_class* must be a subclass of :class:`ScaleBase`.
"""
_scale_mapping[scale_class.name] = scale_class
def get_scale_docs():
"""
Helper function for generating docstrings related to scales.
"""
docs = []
for name in get_scale_names():
scale_class = _scale_mapping[name]
docs.append(" '%s'" % name)
docs.append("")
class_docs = dedent(scale_class.__init__.__doc__)
class_docs = "".join([" %s\n" %
x for x in class_docs.split("\n")])
docs.append(class_docs)
docs.append("")
return "\n".join(docs)
|
agpl-3.0
|
zhongyuanzhou/FCH808.github.io
|
Data Visualization/Project/wrangle/wrangle.py
|
2
|
12108
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
"""
import pandas as pd
from latlon import *
import re
from bs4 import BeautifulSoup
import csv
import sys
import requests
def scrape_birthdays_page(url=None, csv_name=None):
""" Scrape info from nobelprize.org birthdays page
Scrapes info from the birthdays page at:
http://www.nobelprize.org/nobel_prizes/lists/birthdays.html?day=0&month=0&year=&photo=1#birthday_result
Also scrapes each bio_page of each winner for more info.
Writes to csv: thumbnail pic URL,
bio url link,
name
Year Prize Won
Nobel Prize field
Year Born
Year Died
Name again (sync check)
Born City
Died City (if applicable)
Affiliation at time of award
Args:
url: HTML url to nobelprize.org birthdays page
csv_out_name: String with name of csv file name to write to
Returns:
Write csv file to name specified in csv_out_name
"""
r = requests.get(url)
soup = BeautifulSoup(r.text, from_encoding=r.encoding)
each_entry_divs = soup.find_all("div", attrs={"class":"row", "style": "margin-bottom: 15px;"})
each_entry_divs.pop(0)
f = csv.writer(open(csv_name, "wb"))
f.writerow(["name", "bio_thumbnail", "bio_link", "year_won",
"nobel_field", "year_born", "year_died", "name_check",
"born_city", "died_city", "location_at_award"])
for person in each_entry_divs:
bio_thumbnail = person.find("img")['src']
bio_link = person.find(class_='text_link')['href']
nobel_info = person.find_all(class_="medium-10 columns birthdays-result-main")[0].text.split('\n')
year_won = nobel_info[0].split(",")[0]
nobel_field = nobel_info[0].split(",")[1]
# Get rid of extra spaces between some words.
## TODO; uncomment later to redo all scrapes.
nobel_field = " ".join([x.strip() for x in nobel_field.split()])
name = nobel_info[1]
year_born = nobel_info[2].split(":")[1]
try:
year_died = nobel_info[3].split(":")[1]
except IndexError as e:
year_died = ""
bio_link_full = "http://www.nobelprize.org/" + bio_link
name_check, born_city, died_city, affiliation = scrape_bio_page(bio_link_full)
f.writerow([name, bio_thumbnail, bio_link, year_won,
nobel_field, year_born, year_died, name_check,
born_city, died_city, affiliation])
def scrape_bio_page(url=None):
'''Scrape Novel prize winner bio page for info.
Scrapes info from nobelprize.org bio-pages.
Info includes: name,
born_location,
died_location,
affiliation at time of award/ country of residence
Args:
url: Nobelprize.org Bio page to scrape.
Returns:
Four string (may be empty if not present): name, born_location,
died_location, institution
'''
r = requests.get(url)
soup = BeautifulSoup(r.text, from_encoding=r.encoding)
name = soup.find_all(attrs={'itemprop': 'Name'})[0].text
# Find the birthdate node, get its parent, then get the last string in the
# contents which has the city.
born_city = soup.find_all(attrs={'itemprop': 'birthDate'})[0].parent.contents[-1]
try:
death_city = soup.find_all(attrs={'itemprop': 'deathDate'})[0].parent.contents[-1]
except IndexError as e:
death_city = ""
affiliation = "None"
try:
# Peace/Literature Prizes generally have residences at time of award
# but no institution.
residence = soup.find_all('strong', text='Residence at the time of the award:')[0].parent.contents[-1]
affiliation = "None, " + residence
except IndexError as e:
pass
try:
# Overwrite None or Country of Residence with city affiliation if avail.
affiliation = soup.find_all(attrs={'itemprop': 'affiliation'})[0].contents[-1]
except IndexError as e:
pass
return name, born_city, death_city, affiliation
#def find_country_birth(bs4_html):
# all_names = [["name","birth_country_old_name",
# "birth_country_current_name",
# "year","field"]]
# place_acq = ""
# for i in bs4_html:
# # Only place acquired entries have an 'h3' sub-class
# if i.find_all('h3'):
# place_acq = i.h3.text
# # Only field_year/name entries have an 'h6' sub-class.
# if i.find_all('h6'):
# field_year = i.a.text
# name = i.h6.text
# year, field = grab_field_and_number(field_year)
# old_country_name, new_country_name = separate_old_country_names(place_acq)
#
# all_names.append([name.encode('utf-8').strip(),
# old_country_name.encode('utf-8').strip(),
# new_country_name.encode('utf-8').strip(),
# year.encode('utf-8').strip(),
# field.encode('utf-8').strip()])
#
# return df_from_lists(all_names, header_included=True)
def find_age(bs4_html):
all_names = [["name", "age"]]
# place_acq = ""
for i in bs4_html[6].find_all(['h3', 'h6']):
if "Age" in i.string:
age = i.string.split()[-1]
if "Age" not in i.string:
name = i.string
all_names.append([name.encode('utf-8'), age.encode('utf-8')])
return df_from_lists(all_names, header_included=True)
def grab_city_state(city_state, country):
'''
>>> grab_city_state(["Cardio-Pulmonary Laboratory", "Bellevue Hospital", "New York", "NY"], 'USA')
('New York', 'NY', 'Cardio-Pulmonary Laboratory, Bellevue Hospital')
>>> grab_city_state(["Bellevue Hospital", "New York", "NY"], 'USA')
('New York', 'NY', 'Bellevue Hospital')
>>> grab_city_state(['New York', 'NY'], 'USA')
('New York', 'NY', '')
>>> grab_city_state(['New York'], 'USA')
('New York', '', '')
'''
city = ""
state = ""
other = ""
if len(city_state) == 1:
city = city_state.pop()
elif len(city_state) > 1:
if country == "USA":
state = city_state.pop()
city = city_state.pop()
else:
city = city_state.pop()
# Handle a problem case of ';' in Altenberg; Grünau im Almtal
city = city.split(';')[0]
other = ", ".join(city_state)
return city.strip(), state.strip(), other.strip()
def grab_inst_country_citystate(location):
'''
>>> grab_inst_country_citystate("Edinburgh University, Edinburgh, United Kingdom")
('Edinburgh University', 'Edinburgh, United Kingdom')
>>> grab_inst_country_citystate("Fred Hutchinson Cancer Research Center, Seattle, WA, USA")
('Fred Hutchinson Cancer Research Center', 'Seattle, WA, USA')
>>> grab_inst_country_citystate("Columbia University Division, Cardio-Pulmonary Laboratory, Bellevue Hospital, New York, NY, USA")
('Bellevue Hospital', 'New York, NY, USA')
>>> grab_inst_country_citystate('Strasbourg University, Strasbourg, Alsace (then Germany, now France)')
('Strasbourg University', 'Strasbourg, France')
'''
# Handle corner case.
location = location.replace('then Germany, ', '')
# Handle record with missing data.
if location == 'Howard Hughes Medical Institute, , ':
print location
location = 'Howard Hughes Medical Institute, Chevy Chase, MD, USA'
# Many locations end with HHMI, while still having other locations.
if location[-33:] == ', Howard Hughes Medical Institute':
location = location[0:-33]
pieces = location.split(",")
pieces = [each.strip() for each in pieces]
# Many strings have two associated universities
# Some strings have 2 locations in them. Handle these differently.
# Using only the second location.
if len(pieces) >= 6:
# If USA is present, there may will be a state.
if "USA" == pieces[-1]:
institution = pieces[-4]
city = pieces[-3]
state = pieces[-2]
country = pieces[-1]
extra_loc = ""
else:
institution = pieces[-3]
city = pieces[-2]
state = ""
country = pieces[-1]
extra_loc = ""
else:
# Otherwise, process differently
institution = pieces[0]
country = pieces[-1]
city_state = pieces[1:-1]
city, state, extra_loc = grab_city_state(city_state, country)
# Fix problem records for Google map api lookup.
if country == "USSR":
country = "Russia"
if country == "Czechoslovakia":
country = "Czech Republic"
# Don't use any 'extra location' info for now.
# institution = ', '.join(filter(None, [institution, extra_loc]))
location = ', '.join(filter(None, [city, state, country]))
location = get_current_loc(location)
return institution, location
def separate_old_country_names(country):
"""Return old and new country if applicable.
Given a string with two country names, returns the old and new names.
Args:
country: string containing country name. May have old and new names.
Returns:
string of old country name and string of current country name.
*If the country name had not changed, returns same name for both*
>>> separate_old_country_names(' Alsace (now France)')
('Alsace', 'France')
"""
old = ""
new = ""
# if " (now " in country:
# old_and_new = country.split(' (now ')
if "now " in country:
split_string = re.search('\(.*now ', country).group(0)
old_and_new = country.split(split_string)
old = old_and_new[0]
new = old_and_new[1][:-1]
else:
old = country
new = country
return old.strip(), new.strip()
def get_current_loc(location_string):
'''Returns string of updated location.
Pulls out the updated location (now newLocation) from the location
to pass to Google Maps api for lon/lat coordinates.
Args:
location_string: String with location, with possible current updates.
Returns:
string of the updated location only.
'''
location = []
if '(now ' in location_string:
temp = location_string.split(',')
location = []
for word in temp:
if "(now " in word:
word = word.split('(now ')[1].strip(')')
location.append(word)
else:
# If (now not present, just return the original string)
return location_string
return ", ".join(word for word in location)
def map_field(x):
if x == 'The Nobel Prize in Literature':
return "literature"
elif x == 'The Nobel Prize in Chemistry':
return "chemistry"
elif x == 'The Nobel Prize in Physics':
return "physics"
elif x == 'The Nobel Prize in Physiology or Medicine':
return "physiology"
elif x == 'The Sveriges Riksbank Prize in Economic Sciences in Memory of Alfred Nobel':
return "economics"
elif x == 'The Nobel Peace Prize':
return "peace"
def df_from_lists(lists, header_included=True):
"""Makes pandas dataframe from list of lists.
"""
# Mutating global copy of list? Make a copy here.
inside_lists = lists[:]
headers = None
if header_included:
headers = inside_lists.pop(0)
df = pd.DataFrame(inside_lists, columns=headers)
return df
if __name__ == "__main__":
import doctest
doctest.testmod()
|
mit
|
dimroc/tensorflow-mnist-tutorial
|
tensorflowvisu.py
|
4
|
16385
|
# encoding: UTF-8
# Copyright 2016 Google.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
plt.style.use(["ggplot", "tensorflowvisu.mplstyle"])
#import matplotlib
#matplotlib.use('macosx') #this is the default on mac
#print("matplotlib version: " + matplotlib.__version__)
import matplotlib.animation as animation
from matplotlib import rcParams
import math
import tensorflowvisu_digits
tf.set_random_seed(0)
# number of percentile slices for histogram visualisations
HISTOGRAM_BUCKETS = 7
# X: tensor of shape [100+, 28, 28, 1] containing a batch of images (float32)
# Y: tensor of shape [100+, 10] containing recognised digits (one-hot vectors)
# Y_: tensor of shape [100+, 10] containing correct digit labels (one-hot vectors)
# return value: tensor of shape [280, 280, 3] containing the 100 first unrecognised images (rgb, uint8)
# followed by other, recognised images. 100 images max arranged as a 10x10 array. Unrecognised images
# are displayed on a red background and labeled with the correct (left) and recognised digit (right.
def tf_format_mnist_images(X, Y, Y_, n=100, lines=10):
correct_prediction = tf.equal(tf.argmax(Y,1), tf.argmax(Y_,1))
correctly_recognised_indices = tf.squeeze(tf.where(correct_prediction), [1]) # indices of correctly recognised images
incorrectly_recognised_indices = tf.squeeze(tf.where(tf.logical_not(correct_prediction)), [1]) # indices of incorrectly recognised images
everything_incorrect_first = tf.concat([incorrectly_recognised_indices, correctly_recognised_indices], 0) # images reordered with indeces of unrecognised images first
everything_incorrect_first = tf.slice(everything_incorrect_first, [0], [n]) # compute first 100 only - no space to display more anyway
# compute n=100 digits to display only
Xs = tf.gather(X, everything_incorrect_first)
Ys = tf.gather(Y, everything_incorrect_first)
Ys_ = tf.gather(Y_, everything_incorrect_first)
correct_prediction_s = tf.gather(correct_prediction, everything_incorrect_first)
digits_left = tf.image.grayscale_to_rgb(tensorflowvisu_digits.digits_left())
correct_tags = tf.gather(digits_left, tf.argmax(Ys_, 1)) # correct digits to be printed on the images
digits_right = tf.image.grayscale_to_rgb(tensorflowvisu_digits.digits_right())
computed_tags = tf.gather(digits_right, tf.argmax(Ys, 1)) # computed digits to be printed on the images
#superimposed_digits = correct_tags+computed_tags
superimposed_digits = tf.where(correct_prediction_s, tf.zeros_like(correct_tags),correct_tags+computed_tags) # only pring the correct and computed digits on unrecognised images
correct_bkg = tf.reshape(tf.tile([1.3,1.3,1.3], [28*28]), [1, 28,28,3]) # white background
incorrect_bkg = tf.reshape(tf.tile([1.3,1.0,1.0], [28*28]), [1, 28,28,3]) # red background
recognised_bkg = tf.gather(tf.concat([incorrect_bkg, correct_bkg], 0), tf.cast(correct_prediction_s, tf.int32)) # pick either the red or the white background depending on recognised status
I = tf.image.grayscale_to_rgb(Xs)
I = ((1-(I+superimposed_digits))*recognised_bkg)/1.3 # stencil extra data on top of images and reorder them unrecognised first
I = tf.image.convert_image_dtype(I, tf.uint8, saturate=True)
Islices = [] # 100 images => 10x10 image block
for imslice in range(lines):
Islices.append(tf.concat(tf.unstack(tf.slice(I, [imslice*n//lines,0,0,0], [n//lines,28,28,3])), 1))
I = tf.concat(Islices, 0)
return I
# n = HISTOGRAM_BUCKETS (global)
# Buckets the data into n buckets so that there are an equal number of data points in
# each bucket. Returns n+1 bucket boundaries. Spreads the reaminder data.size % n more
# or less evenly among the central buckets.
# data: 1-D ndarray containing float data, MUST BE SORTED in ascending order
# n: integer, the number of desired output buckets
# return value: ndarray, 1-D vector of size n+1 containing the bucket boundaries
# the first value is the min of the data, the last value is the max
def probability_distribution(data):
n = HISTOGRAM_BUCKETS
data.sort()
bucketsize = data.size // n
bucketrem = data.size % n
buckets = np.zeros([n+1])
buckets[0] = data[0] # min
buckets[-1] = data[-1] # max
buckn = 0
rem = 0
remn = 0
k = 0
cnt = 0 # only for assert
lastval = data[0]
for i in range(data.size):
val = data[i]
buckn += 1
cnt += 1
if buckn > bucketsize+rem : ## crossing bucket boundary
cnt -= 1
k += 1
buckets[k] = (val + lastval) / 2
if (k<n+1):
cnt += 1
buckn = 1 # val goes into the new bucket
if k >= (n - bucketrem) // 2 and remn < bucketrem:
rem = 1
remn += 1
else:
rem = 0
lastval = val
assert i+1 == cnt
return buckets
def _empty_collection(collection):
tempcoll = []
for a in (collection):
tempcoll.append(a)
for a in (tempcoll):
collection.remove(a)
def _display_time_histogram(ax, xdata, ydata, color):
_empty_collection(ax.collections)
midl = HISTOGRAM_BUCKETS//2
midh = HISTOGRAM_BUCKETS//2
for i in range(int(math.ceil(HISTOGRAM_BUCKETS/2.0))):
ax.fill_between(xdata, ydata[:,midl-i], ydata[:,midh+1+i], facecolor=color, alpha=1.6/HISTOGRAM_BUCKETS)
if HISTOGRAM_BUCKETS % 2 == 0 and i == 0:
ax.fill_between(xdata, ydata[:,midl-1], ydata[:,midh], facecolor=color, alpha=1.6/HISTOGRAM_BUCKETS)
midl = midl-1
class MnistDataVis:
xmax = 0
y2max = 0
x1 = []
y1 = []
z1 = []
x2 = []
y2 = []
z2 = []
x3 = []
w3 = np.zeros([0,HISTOGRAM_BUCKETS+1])
b3 = np.zeros([0,HISTOGRAM_BUCKETS+1])
im1 = np.full((28*10,28*10,3),255, dtype='uint8')
im2 = np.full((28*10,28*10,3),255, dtype='uint8')
_animpause = False
_animation = None
_mpl_figure = None
_mlp_init_func = None
_mpl_update_func = None
_color4 = None
_color5 = None
def __set_title(self, ax, title, default=""):
if title is not None and title != "":
ax.set_title(title, y=1.02) # adjustment for plot title bottom margin
else:
ax.set_title(default, y=1.02) # adjustment for plot title bottom margin
# retrieve the color from the color cycle, default is 1
def __get_histogram_cyclecolor(self, colornum):
clist = rcParams['axes.prop_cycle']
ccount = 1 if (colornum is None) else colornum
colors = clist.by_key()['color']
for i, c in enumerate(colors):
if (i == ccount % 3):
return c
def __init__(self, title1=None, title2=None, title3=None, title4=None, title5=None, title6=None, histogram4colornum=None, histogram5colornum=None, dpi=70):
self._color4 = self.__get_histogram_cyclecolor(histogram4colornum)
self._color5 = self.__get_histogram_cyclecolor(histogram5colornum)
fig = plt.figure(figsize=(19.20,10.80), dpi=dpi)
plt.gcf().canvas.set_window_title("MNIST")
fig.set_facecolor('#FFFFFF')
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
ax4 = fig.add_subplot(234)
ax5 = fig.add_subplot(235)
ax6 = fig.add_subplot(236)
#fig, ax = plt.subplots() # if you need only 1 graph
self.__set_title(ax1, title1, default="Accuracy")
self.__set_title(ax2, title2, default="Cross entropy loss")
self.__set_title(ax3, title3, default="Training digits")
self.__set_title(ax4, title4, default="Weights")
self.__set_title(ax5, title5, default="Biases")
self.__set_title(ax6, title6, default="Test digits")
#ax1.set_figaspect(1.0)
# TODO: finish exporting the style modifications into a stylesheet
line1, = ax1.plot(self.x1, self.y1, label="training accuracy")
line2, = ax1.plot(self.x2, self.y2, label="test accuracy")
legend = ax1.legend(loc='lower right') # fancybox : slightly rounded corners
legend.draggable(True)
line3, = ax2.plot(self.x1, self.z1, label="training loss")
line4, = ax2.plot(self.x2, self.z2, label="test loss")
legend = ax2.legend(loc='upper right') # fancybox : slightly rounded corners
legend.draggable(True)
ax3.grid(False) # toggle grid off
ax3.set_axis_off()
imax1 = ax3.imshow(self.im1, animated=True, cmap='binary', vmin=0.0, vmax=1.0, interpolation='nearest', aspect=1.0)
ax6.grid(False) # toggle grid off
ax6.axes.get_xaxis().set_visible(False)
imax2 = ax6.imshow(self.im2, animated=True, cmap='binary', vmin=0.0, vmax=1.0, interpolation='nearest', aspect=1.0)
ax6.locator_params(axis='y', nbins=7)
# hack...
ax6.set_yticks([0, 280-4*56, 280-3*56, 280-2*56, 280-56, 280])
ax6.set_yticklabels(["100%", "98%", "96%", "94%", "92%", "90%"])
def _init():
ax1.set_xlim(0, 10) # initial value only, autoscaled after that
ax2.set_xlim(0, 10) # initial value only, autoscaled after that
ax4.set_xlim(0, 10) # initial value only, autoscaled after that
ax5.set_xlim(0, 10) # initial value only, autoscaled after that
ax1.set_ylim(0, 1) # important: not autoscaled
#ax1.autoscale(axis='y')
ax2.set_ylim(0, 100) # important: not autoscaled
return imax1, imax2, line1, line2, line3, line4
def _update():
# x scale: iterations
ax1.set_xlim(0, self.xmax+1)
ax2.set_xlim(0, self.xmax+1)
ax4.set_xlim(0, self.xmax+1)
ax5.set_xlim(0, self.xmax+1)
# four curves: train and test accuracy, train and test loss
line1.set_data(self.x1, self.y1)
line2.set_data(self.x2, self.y2)
line3.set_data(self.x1, self.z1)
line4.set_data(self.x2, self.z2)
#images
imax1.set_data(self.im1)
imax2.set_data(self.im2)
# histograms
_display_time_histogram(ax4, self.x3, self.w3, self._color4)
_display_time_histogram(ax5, self.x3, self.b3, self._color5)
#return changed artists
return imax1, imax2, line1, line2, line3, line4
def _key_event_handler(event):
if len(event.key) == 0:
return
else:
keycode = event.key
# pause/resume with space bar
if keycode == ' ':
self._animpause = not self._animpause
if not self._animpause:
_update()
return
# [p, m, n] p is the #of the subplot, [n,m] is the subplot layout
toggles = {'1':[1,1,1], # one plot
'2':[2,1,1], # one plot
'3':[3,1,1], # one plot
'4':[4,1,1], # one plot
'5':[5,1,1], # one plot
'6':[6,1,1], # one plot
'7':[12,1,2], # two plots
'8':[45,1,2], # two plots
'9':[36,1,2], # two plots
'escape':[123456,2,3], # six plots
'0':[123456,2,3]} # six plots
# other matplotlib keyboard shortcuts:
# 'o' box zoom
# 'p' mouse pan and zoom
# 'h' or 'home' reset
# 's' save
# 'g' toggle grid (when mouse is over a plot)
# 'k' toggle log/lin x axis
# 'l' toggle log/lin y axis
if not (keycode in toggles):
return
for i in range(6):
fig.axes[i].set_visible(False)
fignum = toggles[keycode][0]
if fignum <= 6:
fig.axes[fignum-1].set_visible(True)
fig.axes[fignum-1].change_geometry(toggles[keycode][1], toggles[keycode][2], 1)
ax6.set_aspect(25.0/40) # special case for test digits
elif fignum < 100:
fig.axes[fignum//10-1].set_visible(True)
fig.axes[fignum//10-1].change_geometry(toggles[keycode][1], toggles[keycode][2], 1)
fig.axes[fignum%10-1].set_visible(True)
fig.axes[fignum%10-1].change_geometry(toggles[keycode][1], toggles[keycode][2], 2)
ax6.set_aspect(1.0) # special case for test digits
elif fignum == 123456:
for i in range(6):
fig.axes[i].set_visible(True)
fig.axes[i].change_geometry(toggles[keycode][1], toggles[keycode][2], i+1)
ax6.set_aspect(1.0) # special case for test digits
plt.draw()
fig.canvas.mpl_connect('key_press_event', _key_event_handler)
self._mpl_figure = fig
self._mlp_init_func = _init
self._mpl_update_func = _update
def _update_xmax(self, x):
if (x > self.xmax):
self.xmax = x
def _update_y2max(self, y):
if (y > self.y2max):
self.y2max = y
def append_training_curves_data(self, x, accuracy, loss):
self.x1.append(x)
self.y1.append(accuracy)
self.z1.append(loss)
self._update_xmax(x)
def append_test_curves_data(self, x, accuracy, loss):
self.x2.append(x)
self.y2.append(accuracy)
self.z2.append(loss)
self._update_xmax(x)
self._update_y2max(accuracy)
def get_max_test_accuracy(self):
return self.y2max
def append_data_histograms(self, x, datavect1, datavect2, title1=None, title2=None):
self.x3.append(x)
datavect1.sort()
self.w3 = np.concatenate((self.w3, np.expand_dims(probability_distribution(datavect1), 0)))
datavect2.sort()
self.b3 = np.concatenate((self.b3, np.expand_dims(probability_distribution(datavect2), 0)))
self._update_xmax(x)
def update_image1(self, im):
self.im1 = im
def update_image2(self, im):
self.im2 = im
def is_paused(self):
return self._animpause
def animate(self, compute_step, iterations, train_data_update_freq=20, test_data_update_freq=100, one_test_at_start=True, more_tests_at_start=False, save_movie=False):
def animate_step(i):
if (i == iterations // train_data_update_freq): #last iteration
compute_step(iterations, True, True)
else:
for k in range(train_data_update_freq):
n = i * train_data_update_freq + k
request_data_update = (n % train_data_update_freq == 0)
request_test_data_update = (n % test_data_update_freq == 0) and (n > 0 or one_test_at_start)
if more_tests_at_start and n < test_data_update_freq: request_test_data_update = request_data_update
compute_step(n, request_test_data_update, request_data_update)
# makes the UI a little more responsive
plt.pause(0.001)
if not self.is_paused():
return self._mpl_update_func()
self._animation = animation.FuncAnimation(self._mpl_figure, animate_step, int(iterations // train_data_update_freq + 1), init_func=self._mlp_init_func, interval=16, repeat=False, blit=False)
if save_movie:
mywriter = animation.FFMpegWriter(fps=24, codec='libx264', extra_args=['-pix_fmt', 'yuv420p', '-profile:v', 'high', '-tune', 'animation', '-crf', '18'])
self._animation.save("./tensorflowvisu_video.mp4", writer=mywriter)
else:
plt.show(block=True)
|
apache-2.0
|
IssamLaradji/scikit-learn
|
examples/exercises/plot_cv_diabetes.py
|
15
|
2528
|
"""
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial excercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation, datasets, linear_model
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = linear_model.Lasso()
alphas = np.logspace(-4, -.5, 30)
scores = list()
scores_std = list()
for alpha in alphas:
lasso.alpha = alpha
this_scores = cross_validation.cross_val_score(lasso, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
plt.figure(figsize=(4, 3))
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
plt.semilogx(alphas, np.array(scores) + np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.semilogx(alphas, np.array(scores) - np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.ylabel('CV score')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = linear_model.LassoCV(alphas=alphas)
k_fold = cross_validation.KFold(len(X), 3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
|
bsd-3-clause
|
simon-pepin/scikit-learn
|
sklearn/metrics/pairwise.py
|
104
|
42995
|
# -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if x varies but y remains unchanged, then the right-most dot
product `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if Y_norm_squared is not None:
YY = check_array(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
XX = YY.T
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
|
bsd-3-clause
|
boada/vpCluster
|
data/boada/analysis_all/mkClusterResults.py
|
1
|
9117
|
import pandas as pd
from glob import glob
import numpy as np
from astLib import astStats as ast
from astLib import astCalc as aca
from sklearn import mixture
import h5py as hdf
import emcee
import sys
# buzzard simulation cosmology
aca.H0 = 70
aca.OMEGA_M0 = 0.286
aca.OMEGA_L0 = 0.714
# millennium cosmology
#aca.H0 = 73
#aca.OMEGA_M0 = 0.25
#aca.OMEGA_L0 = 0.75
def findClusterCenterRedshift(data, errors=False):
''' Finds the center of the cluster in redshift space using the
biweightlocation estimator. Puts the result in the ClusZ column of the data
array. If errors = True, then this will return the 95% C.I. from 1000
bootstrap shuffles.
'''
x = np.copy(data.redshift[data.interloper == 'NO'].values)
w = np.copy(data.redshift_err.values) * 2.
#avgz = ast.biweightLocation(x, tuningConstant=6.0)
#return ast.biweightClipped(data['Z'], 6.0, 3)['biweightLocation']
#return ast.biweightLocation(data['Z'], tuningConstant=6.0)
avgz = np.average(x, weights=1. / w)
#print len(x)
if errors:
ci = ast.bootstrap(x, np.average, weights=1. / w)
return avgz, ci
else:
return avgz
def calcLOSVD(data, errors=False):
''' Using the previously computed LOSVs we will find the LOSVD. This will
give us a few options to do that based on the number of objects that we
have. If errors = True, then this will return the 95% C.I. from 1000
bootstrap shuffles.
'''
if data.interloper.value_counts().NO >= 15:
x = np.copy(data.LOSV[data.interloper == 'NO'].values)
LOSVD = ast.biweightScale_test(x, tuningConstant=9.0)
if errors:
ci = ast.bootstrap(x, ast.biweightScale_test, tuningConstant=9.0)
return LOSVD, ci
else:
return LOSVD
else:
x = np.copy(data.LOSV[data.interloper == 'NO'].values)
LOSVD = ast.gapperEstimator(x)
if errors:
ci = ast.bootstrap(x, ast.gapperEstimator)
return LOSVD, ci
else:
return LOSVD
def calc_mass_Evrard(data, A1D=1177., alpha=0.364):
''' This uses the relation from Munari2013 to calculate the halo mass from
the observed velocity dispersion. The chosen scaling relations are from
their table 1 which has been calibrated using galaxies and not dark matter
halos only.
'''
avgz = findClusterCenterRedshift(data)
vd = data.LOSVD.values[0]
if avgz == None:
pass
else:
return 1e15 / (aca.H0 * aca.Ez(avgz) / 100.) * (vd / A1D)**(1 / alpha)
def findLOSVDgmm(data):
''' Finds the LOSVD using a gaussian mixture model. Not currently used. '''
LOSV = data['LOSV'][:, np.newaxis]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 4)
cv_types = ['spherical', 'tied', 'diag', 'full']
#cv_types = ['diag']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components,
covariance_type=cv_type,
n_init=10)
gmm.fit(LOSV)
bic.append(gmm.bic(LOSV))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
# figure things out -- this comes from the wikipedia article on mixture
# distributions. I have no idea if it is completely trustworthy.
#covars = best_gmm.covars_.ravel()
#weights = best_gmm.weights_.ravel()
#means = best_gmm.means_.ravel()
#wmeans = np.sum(weights*means)
#parts = weights * ((means - wmeans)**2 + covars)
#newLOSVD = np.sqrt(np.sum(parts))
## now we resample and then see
dx = np.linspace(LOSV.min() - 100, LOSV.max() + 100, 1000)
logprob, responsibilities = best_gmm.score_samples(dx[:, np.newaxis])
pdf = np.exp(logprob)
normedPDF = pdf / np.sum(pdf)
u = np.sum(dx * normedPDF)
data['LOSVDgmm'] = np.sqrt(np.sum(normedPDF * (dx - u)**2))
return data
def findLOSVDmcmc(data):
''' Find the LOSVD and mean velocity using the MCMC and the likelihood
function from walker2006. This tends to work better than any of the other
methods and is the method we are using for the DES paper.
'''
def log_prior(theta, LOSV):
sigma, mu = theta
if not 50 < sigma < 1400:
return -np.inf
if not LOSV.min() < mu < LOSV.max():
return -np.inf
return 1
def log_likelihood(theta, LOSV, LOSV_err):
sigma, mu = theta
#print(theta)
# break long equation into three parts
a = -0.5 * np.sum(np.log(LOSV_err**2 + sigma**2))
b = -0.5 * np.sum((LOSV - mu)**2 / (LOSV_err**2 + sigma**2))
c = -1. * LOSV.size / 2. * np.log(2 * np.pi)
return a + b + c
def log_posterior(theta, LOSV, LOSV_err):
lp = log_prior(theta, LOSV)
if not np.isfinite(lp):
return -np.inf
return lp + log_likelihood(theta, LOSV, LOSV_err)
# get data
LOSV = data['LOSV'].values
try:
LOSV_err = data['LOSV_err'].values
except KeyError:
LOSV_err = np.zeros_like(LOSV)
ndim = 2 # number of parameters in the model
nwalkers = 40 # number of MCMC walkers
nburn = 50 # "burn-in" period to let chains stabilize
nsteps = 300 # number of MCMC steps to take
# set theta near the maximum likelihood, with
np.random.seed()
m = np.random.normal(np.mean(LOSV), scale=1, size=(nwalkers))
s = np.random.normal(200, scale=1, size=(nwalkers))
starting_guesses = np.vstack([s, m]).T
sampler = emcee.EnsembleSampler(nwalkers,
ndim,
log_posterior,
args=[LOSV, LOSV_err])
sampler.run_mcmc(starting_guesses, nsteps)
samples = sampler.chain[:, nburn:, :].reshape((-1, ndim))
sigma_rec, mean_rec = map(
lambda v: (v[1], v[2] - v[1], v[1] - v[0]),
zip(*np.percentile(samples, [16, 50, 84], axis=0)))
#data['LOSVD'] = sigma_rec[0]
#data['LOSVD_err'] = sigma_rec[1], sigma_rec[2]
#return data, samples
return sigma_rec, samples
if __name__ == "__main__":
''' This takes the membership information from the member catalogs and
computes the LOSVD and cluster mass from the member galaxies. Right now it
is using a simple power law to do this, but things will change in the
future when I add the stuff from the des study.
'''
if len(sys.argv) == 1:
# make results container
results = np.zeros(
(10, ),
dtype=[('ID', 'a', 25), ('SOURCES', '>i4'), ('Q0', '>i4'),
('Q1', '>i4'), ('MEMBERS', '>i4'), ('Zc', '>f4'),
('Zc_err', '>f4'), ('LOSVD', '>f4'), ('LOSVD_err', '>f4'),
('LOSVD_dist', '>f4',
(10000, )), ('MASS', '>f4'), ('MASS_err', '>f4')])
clusters = glob('./members/*_members.csv')
results['ID'] = [c.rstrip('_members.csv').split('/')[-1]
for c in clusters]
for i, c in enumerate(clusters):
data = pd.read_csv(c)
results['Q0'][i] = data[data.Q == 0].shape[0]
results['Q1'][i] = data[data.Q == 1].shape[0]
allobs = pd.read_csv('./redshifts/%s_redshifts.csv' %
results['ID'][i])
results['SOURCES'][i] = allobs[~np.isnan(allobs.Q)].shape[0]
# filter out the non-members
data = data[data.interloper == 'NO']
results['MEMBERS'][i] = data.shape[0]
# find redshift
clusz = findClusterCenterRedshift(data, errors=True)
results['Zc'][i] = clusz[0]
# errors on a weighted mean, Eq. 4.19, Bevington
results['Zc_err'][i] = 1. / np.sqrt(np.sum(
1. / data.redshift_err.values))
# calculate the LOSVD with the mcmc, without error to start
LOSVD, LOSVD_dist = findLOSVDmcmc(data)
results['LOSVD'][i] = LOSVD[0]
results['LOSVD_dist'][i] = LOSVD_dist[:, 0] # only LOSVD column
results['LOSVD_err'][i] = np.std(LOSVD_dist[:, 0])
# calculate the mass
data['LOSVD'] = LOSVD[0]
results['MASS'][i] = calc_mass_Evrard(data)
# propigation of errors, Bevington
results['MASS_err'][i] = results['MASS'][i]/0.364 *\
(results['LOSVD_err'][i]/ results['LOSVD'][i])
results['MASS_err'] = 0.434 * results['MASS_err'] / results['MASS']
results['MASS'] = np.log10(results['MASS'])
with hdf.File('results_cluster.hdf5', 'w') as f:
f['cluster props'] = results
f.flush()
elif len(sys.argv) == 2:
cluster = './members/' + sys.argv[1] + '_members.csv'
data = pd.read_csv(cluster)
mass = calc_mass_Evrard(data)
cluster = cluster.rstrip('_members.csv')
print cluster, mass / 1e15
|
mit
|
pianomania/scikit-learn
|
sklearn/naive_bayes.py
|
20
|
30830
|
# -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <[email protected]>
# Minor fixes by Fabian Pedregosa
# Amit Aides <[email protected]>
# Yehuda Finkelstein <[email protected]>
# Lars Buitinck
# Jan Hendrik Metzen <[email protected]>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array, check_consistent_length
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .utils.fixes import in1d
from .utils.validation import check_is_fitted
from .externals import six
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via `partial_fit` method.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Parameters
----------
priors : array-like, shape (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB(priors=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB(priors=None)
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, priors=None):
self.priors = priors
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Gaussian Naive Bayes supports fitting with *sample_weight*.
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like, shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like, shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like, shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like, shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight / n_new)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight / n_new)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_past / float(n_new * n_total)) *
(n_new * mu - n_new * new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,), optional (default=None)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Returns
-------
self : object
Returns self.
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,), optional (default=None)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit: bool, optional (default=False)
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like, shape (n_samples,), optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
check_consistent_length(y, sample_weight)
# If the ratio of data variance between dimensions is too small, it
# will cause numerical errors. To address this, we artificially
# boost the variance by epsilon, a small fraction of the standard
# deviation of the largest dimension.
epsilon = 1e-9 * np.var(X, axis=0).max()
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_count_ = np.zeros(n_classes, dtype=np.float64)
# Initialise the class prior
n_classes = len(self.classes_)
# Take into account the priors
if self.priors is not None:
priors = np.asarray(self.priors)
# Check that the provide prior match the number of classes
if len(priors) != n_classes:
raise ValueError('Number of priors must match number of'
' classes.')
# Check that the sum is 1
if priors.sum() != 1.0:
raise ValueError('The sum of the priors should be 1.')
# Check that the prior are non-negative
if (priors < 0).any():
raise ValueError('Priors must be non-negative.')
self.class_prior_ = priors
else:
# Initialize the priors to zeros for each class
self.class_prior_ = np.zeros(len(self.classes_),
dtype=np.float64)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= epsilon
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(unique_y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += epsilon
# Update if only no priors is provided
if self.priors is None:
# Empirical prior, with sample_weight taken into account
self.class_prior_ = self.class_count_ / self.class_count_.sum()
return self
def _joint_log_likelihood(self, X):
check_is_fitted(self, "classes_")
X = check_array(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_) -
np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes], optional (default=None)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples], optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
elif n_features != self.coef_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.coef_.shape[-1]))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional (default=None)
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean, optional (default=True)
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,), optional (default=None)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : property
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : property
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T) +
self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional (default=0.0)
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean, optional (default=True)
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,], optional (default=None)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape = [n_classes]
Log probability of each class (smoothed).
feature_log_prob_ : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
class_count_ : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = self.class_count_ + self.alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
|
bsd-3-clause
|
hvasbath/beat
|
beat/models/seismic.py
|
1
|
49094
|
from logging import getLogger
import os
import copy
from time import time
import numpy as num
from theano import shared
from theano import config as tconfig
import theano.tensor as tt
from theano.printing import Print
from pyrocko.gf import LocalEngine
from pyrocko.trace import Trace
from beat import theanof, utility
from beat.ffi import load_gf_library, get_gf_prefix
from beat import config as bconfig
from beat import heart, covariance as cov
from beat.models.base import \
ConfigInconsistentError, Composite, FaultGeometryNotFoundError
from beat.models.distributions import multivariate_normal_chol, get_hyper_name
from pymc3 import Uniform, Deterministic
from collections import OrderedDict
logger = getLogger('seismic')
__all__ = [
'SeismicGeometryComposite',
'SeismicDistributerComposite']
class SeismicComposite(Composite):
"""
Comprises how to solve the non-linear seismic forward model.
Parameters
----------
sc : :class:`config.SeismicConfig`
configuration object containing seismic setup parameters
events: list
of :class:`pyrocko.model.Event`
project_dir : str
directory of the model project, where to find the data
hypers : boolean
if true initialise object for hyper parameter optimization
"""
_datasets = None
_weights = None
_targets = None
_hierarchicalnames = None
def __init__(self, sc, events, project_dir, hypers=False):
super(SeismicComposite, self).__init__(events)
logger.debug('Setting up seismic structure ...\n')
self.name = 'seismic'
self._like_name = 'seis_like'
self.correction_name = 'time_shift'
self.engine = LocalEngine(
store_superdirs=[sc.gf_config.store_superdir])
if sc.responses_path is not None:
responses_path = os.path.join(
sc.responses_path, bconfig.response_file_name)
else:
responses_path = sc.responses_path
# load data
self.datahandlers = []
for i in range(self.nevents):
seismic_data_path = os.path.join(
project_dir, bconfig.multi_event_seismic_data_name(i))
logger.info(
'Loading seismic data for event %i'
' from: %s ' % (i, seismic_data_path))
self.datahandlers.append(
heart.init_datahandler(
seismic_config=sc,
seismic_data_path=seismic_data_path,
responses_path=responses_path))
self.noise_analyser = cov.SeismicNoiseAnalyser(
structure=sc.noise_estimator.structure,
pre_arrival_time=sc.noise_estimator.pre_arrival_time,
engine=self.engine,
events=self.events,
chop_bounds=['b', 'c'])
self.wavemaps = []
for i, wc in enumerate(sc.waveforms):
if wc.include:
wmap = heart.init_wavemap(
waveformfit_config=wc,
datahandler=self.datahandlers[wc.event_idx],
event=self.events[wc.event_idx],
mapnumber=i)
self.wavemaps.append(wmap)
else:
logger.info(
'The waveform defined in "%s %i" config is not '
'included in the optimization!' % (wc.name, i))
if hypers:
self._llks = []
for t in range(self.n_t):
self._llks.append(
shared(
num.array([1.]), name='seis_llk_%i' % t, borrow=True))
def _hyper2wavemap(self, hypername):
dummy = '_'.join(hypername.split('_')[1:-1])
for wmap in self.wavemaps:
if wmap._mapid == dummy:
return wmap
raise ValueError(
'No waveform mapping found for hyperparameter! %s' % hypername)
def get_hypersize(self, hp_name):
"""
Return size of the hyperparameter
Parameters
----------
hp_name: str
of hyperparameter name
Returns
-------
int
"""
if self.config.dataset_specific_residual_noise_estimation:
wmap = self._hyper2wavemap(hp_name)
return wmap.hypersize
else:
return 1
def __getstate__(self):
self.engine.close_cashed_stores()
return self.__dict__.copy()
def analyse_noise(self, tpoint=None, chop_bounds=['b', 'c']):
"""
Analyse seismic noise in datatraces and set
data-covariance matrixes accordingly.
"""
if self.config.noise_estimator.structure == 'non-toeplitz':
results = self.assemble_results(
tpoint, order='wmap', chop_bounds=chop_bounds)
else:
results = [None] * len(self.wavemaps)
for wmap, wmap_results in zip(self.wavemaps, results):
logger.info(
'Retrieving seismic data-covariances with structure "%s" '
'for %s ...' % (
self.config.noise_estimator.structure, wmap._mapid))
cov_ds_seismic = self.noise_analyser.get_data_covariances(
wmap=wmap, results=wmap_results,
sample_rate=self.config.gf_config.sample_rate,
chop_bounds=chop_bounds)
for j, trc in enumerate(wmap.datasets):
if trc.covariance is None:
trc.covariance = heart.Covariance(data=cov_ds_seismic[j])
else:
trc.covariance.data = cov_ds_seismic[j]
if int(trc.covariance.data.sum()) == trc.data_len():
logger.warning(
'Data covariance is identity matrix!'
' Please double check!!!')
def init_hierarchicals(self, problem_config):
"""
Initialise random variables for temporal station corrections.
"""
hierarchicals = problem_config.hierarchicals
self._hierarchicalnames = []
if not self.config.station_corrections and \
self.correction_name in hierarchicals:
raise ConfigInconsistentError(
'Station corrections disabled, but they are defined'
' in the problem configuration!')
if self.config.station_corrections and \
self.correction_name not in hierarchicals:
raise ConfigInconsistentError(
'Station corrections enabled, but they are not defined'
' in the problem configuration!')
if self.correction_name in hierarchicals:
logger.info(
'Estimating time shift for each station and waveform map...')
for wmap in self.wavemaps:
hierarchical_name = wmap.time_shifts_id
nhierarchs = len(wmap.get_station_names())
logger.info(
'For %s with %i shifts' % (
hierarchical_name, nhierarchs))
if hierarchical_name in hierarchicals:
logger.info(
'Using wavemap specific imported:'
' %s ' % hierarchical_name)
param = hierarchicals[hierarchical_name]
else:
logger.info('Using global %s' % self.correction_name)
param = copy.deepcopy(
problem_config.hierarchicals[self.correction_name])
param.lower = num.repeat(param.lower, nhierarchs)
param.upper = num.repeat(param.upper, nhierarchs)
param.testvalue = num.repeat(param.testvalue, nhierarchs)
if hierarchical_name not in self.hierarchicals:
if not num.array_equal(
param.lower, param.upper):
kwargs = dict(
name=hierarchical_name,
shape=param.dimension,
lower=param.lower,
upper=param.upper,
testval=param.testvalue,
transform=None,
dtype=tconfig.floatX)
try:
self.hierarchicals[
hierarchical_name] = Uniform(**kwargs)
except TypeError:
kwargs.pop('name')
self.hierarchicals[hierarchical_name] = \
Uniform.dist(**kwargs)
self._hierarchicalnames.append(
hierarchical_name)
else:
logger.info(
'not solving for %s, got fixed at %s' % (
param.name,
utility.list2string(
param.lower.flatten())))
self.hierarchicals[
hierarchical_name] = param.lower
def export(self, point, results_path, stage_number,
fix_output=False, force=False, update=False,
chop_bounds=['b', 'c']):
"""
Save results for given point to result path.
"""
def save_covs(wmap, cov_mat='pred_v'):
"""
Save covariance matrixes of given attribute
"""
covs = {
utility.list2string(dataset.nslc_id):
getattr(dataset.covariance, cov_mat)
for dataset in wmap.datasets}
outname = os.path.join(
results_path, '%s_C_%s_%s' % (
'seismic', cov_mat, wmap._mapid))
logger.info('"%s" to: %s' % (wmap._mapid, outname))
num.savez(outname, **covs)
from pyrocko import io
# synthetics and data
results = self.assemble_results(point, chop_bounds=chop_bounds)
for traces, attribute in heart.results_for_export(
results=results, datatype='seismic'):
filename = '%s_%i.mseed' % (attribute, stage_number)
outpath = os.path.join(results_path, filename)
try:
io.save(traces, outpath, overwrite=force)
except io.mseed.CodeTooLong:
if fix_output:
for tr in traces:
tr.set_station(tr.station[-5::])
tr.set_location(
str(self.config.gf_config.reference_model_idx))
io.save(traces, outpath, overwrite=force)
else:
raise ValueError(
'Some station codes are too long! '
'(the --fix_output option will truncate to '
'last 5 characters!)')
# export stdz residuals
self.analyse_noise(point, chop_bounds=chop_bounds)
if update:
logger.info('Saving velocity model covariance matrixes...')
self.update_weights(point, chop_bounds=chop_bounds)
for wmap in self.wavemaps:
save_covs(wmap, 'pred_v')
logger.info('Saving data covariance matrixes...')
for wmap in self.wavemaps:
save_covs(wmap, 'data')
def init_weights(self):
"""
Initialise shared weights in wavemaps.
"""
logger.info('Initialising weights ...')
for wmap in self.wavemaps:
weights = []
for j, trc in enumerate(wmap.datasets):
icov = trc.covariance.chol_inverse
weights.append(
shared(
icov,
name='seis_%s_weight_%i' % (wmap._mapid, j),
borrow=True))
wmap.add_weights(weights)
def get_all_station_names(self):
"""
Returns list of station names in the order of wavemaps.
"""
us = []
for wmap in self.wavemaps:
us.extend(wmap.get_station_names())
return us
def get_unique_time_shifts_ids(self):
"""
Return unique time_shifts ids from wavemaps, which are keys to
hierarchical RVs of station corrections
"""
ts = []
for wmap in self.wavemaps:
ts.append(wmap.time_shifts_id)
return utility.unique_list(ts)
def get_unique_station_names(self):
"""
Return unique station names from all wavemaps
"""
return utility.unique_list(self.get_all_station_names())
@property
def n_t(self):
return sum(wmap.n_t for wmap in self.wavemaps)
@property
def datasets(self):
if self._datasets is None:
ds = []
for wmap in self.wavemaps:
ds.extend(wmap.datasets)
self._datasets = ds
return self._datasets
@property
def weights(self):
if self._weights is None or len(self._weights) == 0:
ws = []
for wmap in self.wavemaps:
if wmap.weights:
ws.extend(wmap.weights)
self._weights = ws
return self._weights
@property
def targets(self):
if self._targets is None:
ts = []
for wmap in self.wavemaps:
ts.extend(wmap.targets)
self._targets = ts
return self._targets
def assemble_results(
self, point, chop_bounds=['a', 'd'], order='list',
outmode='stacked_traces'):
"""
Assemble seismic traces for given point in solution space.
Parameters
----------
point : :func:`pymc3.Point`
Dictionary with model parameters
Returns
-------
List with :class:`heart.SeismicResult`
"""
if point is None:
raise ValueError('A point has to be provided!')
logger.debug('Assembling seismic waveforms ...')
syn_proc_traces, obs_proc_traces = self.get_synthetics(
point, outmode=outmode,
chop_bounds=chop_bounds, order='wmap')
# will yield exactly the same as previous call needs wmap.prepare data
# to be aware of taper_tolerance_factor
# DEPRECATED but keep for now
# syn_filt_traces, obs_filt_traces = self.get_synthetics(
# point, outmode=outmode, taper_tolerance_factor=0.,
# chop_bounds=chop_bounds, order='wmap')
# syn_filt_traces, obs_filt_traces = syn_proc_traces, obs_proc_traces
#from pyrocko import trace
#trace.snuffle(syn_proc_traces + obs_proc_traces)
results = []
for i, wmap in enumerate(self.wavemaps):
wc = wmap.config
at = wc.arrival_taper
wmap_results = []
for j, obs_tr in enumerate(obs_proc_traces[i]):
taper = at.get_pyrocko_taper(
float(obs_tr.tmin - at.a))
if outmode != 'tapered_data':
source_contributions = [syn_proc_traces[i][j]]
else:
source_contributions = syn_proc_traces[i][j]
wmap_results.append(heart.SeismicResult(
point=point,
processed_obs=obs_tr,
source_contributions=source_contributions,
taper=taper))
if order == 'list':
results.extend(wmap_results)
elif order == 'wmap':
results.append(wmap_results)
else:
raise ValueError('Order "%s" is not supported' % order)
return results
def update_llks(self, point):
"""
Update posterior likelihoods of the composite with respect to one point
in the solution space.
Parameters
----------
point : dict
with numpy array-like items and variable name keys
"""
results = self.assemble_results(point, chop_bounds=['b', 'c'])
for k, result in enumerate(results):
choli = self.datasets[k].covariance.chol_inverse
tmp = choli.dot(result.processed_res.ydata)
_llk = num.asarray([num.dot(tmp, tmp)])
self._llks[k].set_value(_llk)
def get_standardized_residuals(self, point, chop_bounds=['b', 'c']):
"""
Parameters
----------
point : dict
with parameters to point in solution space to calculate
standardized residuals
Returns
-------
dict of arrays of standardized residuals,
keys are nslc_ids
"""
results = self.assemble_results(
point, order='list', chop_bounds=chop_bounds)
self.update_weights(point, chop_bounds=chop_bounds)
counter = utility.Counter()
hp_specific = self.config.dataset_specific_residual_noise_estimation
stdz_res = OrderedDict()
for data_trc, result in zip(self.datasets, results):
hp_name = get_hyper_name(data_trc)
if hp_specific:
hp = point[hp_name][counter(hp_name)]
else:
hp = point[hp_name]
choli = num.linalg.inv(
data_trc.covariance.chol * num.exp(hp) / 2.)
stdz_res[data_trc.nslc_id] = choli.dot(
result.processed_res.get_ydata())
return stdz_res
def get_variance_reductions(
self, point, results=None, weights=None, chop_bounds=['a', 'd']):
"""
Parameters
----------
point : dict
with parameters to point in solution space to calculate
variance reductions
Returns
-------
dict of floats,
keys are nslc_ids
"""
if results is None:
results = self.assemble_results(
point, order='list', chop_bounds=chop_bounds)
ndatasets = len(self.datasets)
assert len(results) == ndatasets
if weights is None:
self.analyse_noise(point, chop_bounds=chop_bounds)
self.update_weights(point, chop_bounds=chop_bounds)
weights = self.weights
nweights = len(weights)
assert nweights == ndatasets
logger.debug(
'n weights %i , n datasets %i' % (nweights, ndatasets))
assert nweights == ndatasets
logger.debug('Calculating variance reduction for solution ...')
var_reds = OrderedDict()
for data_trc, weight, result in zip(
self.datasets, weights, results):
icov = data_trc.covariance.inverse
data = result.processed_obs.get_ydata()
residual = result.processed_res.get_ydata()
nom = residual.T.dot(icov).dot(residual)
denom = data.T.dot(icov).dot(data)
logger.debug('nom %f, denom %f' % (float(nom), float(denom)))
var_red = 1 - (nom / denom)
nslc_id = utility.list2string(data_trc.nslc_id)
logger.debug(
'Variance reduction for %s is %f' % (nslc_id, var_red))
if 0:
from matplotlib import pyplot as plt
fig, ax = plt.subplots(1, 1)
im = ax.imshow(data_trc.covariance.data)
plt.colorbar(im)
plt.show()
var_reds[nslc_id] = var_red
return var_reds
class SeismicGeometryComposite(SeismicComposite):
"""
Comprises how to solve the non-linear seismic forward model.
Parameters
----------
sc : :class:`config.SeismicConfig`
configuration object containing seismic setup parameters
project_dir : str
directory of the model project, where to find the data
sources : list
of :class:`pyrocko.gf.seismosizer.Source`
events : list
of :class:`pyrocko.model.Event`
contains information of reference event(s), coordinates of reference
point(s) and source time(s)
hypers : boolean
if true initialise object for hyper parameter optimization
"""
def __init__(self, sc, project_dir, sources, events, hypers=False):
super(SeismicGeometryComposite, self).__init__(
sc, events, project_dir, hypers=hypers)
self._mode = 'geometry'
self.synthesizers = {}
self.choppers = {}
self.sources = sources
self.correction_name = 'time_shift'
self.config = sc
def point2sources(self, point):
"""
Updates the composite source(s) (in place) with the point values.
Parameters
----------
point : dict
with random variables from solution space
"""
tpoint = copy.deepcopy(point)
tpoint = utility.adjust_point_units(tpoint)
# remove hyperparameters from point
hps = self.config.get_hypernames()
for hyper in hps:
if hyper in tpoint:
tpoint.pop(hyper)
source = self.sources[0]
source_params = list(source.keys()) + list(source.stf.keys())
for param in list(tpoint.keys()):
if param not in source_params:
tpoint.pop(param)
# update source times
if self.nevents == 1:
tpoint['time'] += self.event.time # single event
else:
for i, event in enumerate(self.events): # multi event
tpoint['time'][i] += event.time
source_points = utility.split_point(tpoint)
for i, source in enumerate(self.sources):
utility.update_source(source, **source_points[i])
def get_formula(
self, input_rvs, fixed_rvs, hyperparams, problem_config):
"""
Get seismic likelihood formula for the model built. Has to be called
within a with model context.
Parameters
----------
input_rvs : list
of :class:`pymc3.distribution.Distribution` of source parameters
fixed_rvs : dict
of :class:`numpy.array`
hyperparams : dict
of :class:`pymc3.distribution.Distribution`
problem_config : :class:`config.ProblemConfig`
Returns
-------
posterior_llk : :class:`theano.tensor.Tensor`
"""
chop_bounds=['b', 'c'] # we want llk calculation only between b c
hp_specific = self.config.dataset_specific_residual_noise_estimation
tpoint = problem_config.get_test_point()
self.input_rvs = input_rvs
self.fixed_rvs = fixed_rvs
logger.info(
'Seismic optimization on: \n '
' %s' % ', '.join(self.input_rvs.keys()))
self.input_rvs.update(fixed_rvs)
t2 = time()
wlogpts = []
self.init_hierarchicals(problem_config)
self.analyse_noise(tpoint, chop_bounds=chop_bounds)
self.init_weights()
if self.config.station_corrections:
logger.info(
'Initialized %i hierarchical parameters for '
'station corrections.' % len(self.get_all_station_names()))
for wmap in self.wavemaps:
if len(self.hierarchicals) > 0:
time_shifts = self.hierarchicals[
wmap.time_shifts_id][wmap.station_correction_idxs]
self.input_rvs[self.correction_name] = time_shifts
wc = wmap.config
logger.info(
'Preparing data of "%s" for optimization' % wmap._mapid)
wmap.prepare_data(
source=self.events[wc.event_idx],
engine=self.engine,
outmode='array',
chop_bounds=chop_bounds)
logger.info(
'Initializing synthesizer for "%s"' % wmap._mapid)
if self.nevents == 1:
logger.info('Using all sources for wavemap %s !' % wmap._mapid)
sources = self.sources
else:
logger.info(
'Using source based on event %i for wavemap %s!' % (
wc.event_idx, wmap._mapid))
sources = [self.sources[wc.event_idx]]
self.synthesizers[wmap._mapid] = theanof.SeisSynthesizer(
engine=self.engine,
sources=sources,
targets=wmap.targets,
event=self.events[wc.event_idx],
arrival_taper=wc.arrival_taper,
arrival_times=wmap._arrival_times,
wavename=wmap.name,
filterer=wc.filterer,
pre_stack_cut=self.config.pre_stack_cut,
station_corrections=self.config.station_corrections)
synths, _ = self.synthesizers[wmap._mapid](self.input_rvs)
residuals = wmap.shared_data_array - synths
logpts = multivariate_normal_chol(
wmap.datasets, wmap.weights, hyperparams, residuals,
hp_specific=hp_specific)
wlogpts.append(logpts)
t3 = time()
logger.debug(
'Teleseismic forward model on test model takes: %f' %
(t3 - t2))
llk = Deterministic(self._like_name, tt.concatenate((wlogpts)))
return llk.sum()
def get_synthetics(self, point, **kwargs):
"""
Get synthetics for given point in solution space.
Parameters
----------
point : :func:`pymc3.Point`
Dictionary with model parameters
kwargs especially to change output of seismic forward model
outmode = 'traces'/ 'array' / 'data'
Returns
-------
default: array of synthetics for all targets
"""
outmode = kwargs.pop('outmode', 'stacked_traces')
chop_bounds = kwargs.pop('chop_bounds', ['a', 'd'])
order = kwargs.pop('order', 'list')
nprocs = kwargs.pop('nprocs', 1)
self.point2sources(point)
sc = self.config
synths = []
obs = []
for wmap in self.wavemaps:
wc = wmap.config
wmap.prepare_data(
source=self.events[wc.event_idx],
engine=self.engine,
outmode=outmode,
chop_bounds=chop_bounds)
arrival_times = wmap._arrival_times
if self.config.station_corrections:
try:
arrival_times += point[
wmap.time_shifts_id][wmap.station_correction_idxs]
except KeyError: # got reference point from config
arrival_times += float(point[self.correction_name]) * \
num.ones(wmap.n_t)
if self.nevents == 1:
logger.debug('Using all sources for each wavemap!')
sources = self.sources
else:
logger.debug(
'Using individual sources based on event index '
'for each wavemap!')
sources = [self.sources[wc.event_idx]]
synthetics, _ = heart.seis_synthetics(
engine=self.engine,
sources=sources,
targets=wmap.targets,
arrival_taper=wc.arrival_taper,
wavename=wmap.name,
filterer=wc.filterer,
pre_stack_cut=sc.pre_stack_cut,
arrival_times=arrival_times,
outmode=outmode,
chop_bounds=chop_bounds,
nprocs=nprocs,
# plot=True,
**kwargs)
if self.config.station_corrections:
# set tmin to data tmin
for tr, dtr in zip(synthetics, wmap._prepared_data):
if isinstance(tr, list):
for t in tr:
t.tmin = dtr.tmin
t.tmax = dtr.tmax
else:
tr.tmin = dtr.tmin
tr.tmax = dtr.tmax
if order == 'list':
synths.extend(synthetics)
obs.extend(wmap._prepared_data)
elif order == 'wmap':
synths.append(synthetics)
obs.append(wmap._prepared_data)
else:
raise ValueError('Order "%s" is not supported' % order)
return synths, obs
def update_weights(
self, point, n_jobs=1, plot=False, chop_bounds=['b', 'c']):
"""
Updates weighting matrixes (in place) with respect to the point in the
solution space.
Parameters
----------
point : dict
with numpy array-like items and variable name keys
"""
if not self.weights:
self.init_weights()
sc = self.config
self.point2sources(point)
# update data covariances in case model dependend non-toeplitz
if self.config.noise_estimator.structure == 'non-toeplitz':
logger.info('Updating data-covariances ...')
self.analyse_noise(point, chop_bounds=chop_bounds)
crust_inds = range(*sc.gf_config.n_variations)
thresh = 5
if len(crust_inds) > thresh:
logger.info('Updating seismic velocity model-covariances ...')
if self.config.noise_estimator.structure == 'non-toeplitz':
logger.warning(
'Non-toeplitz estimation in combination with model '
'prediction covariances is still EXPERIMENTAL and results'
' should be interpreted with care!!')
for wmap in self.wavemaps:
wc = wmap.config
arrival_times = wmap._arrival_times
if self.config.station_corrections:
arrival_times += point[
wmap.time_shifts_id][wmap.station_correction_idxs]
for channel in wmap.channels:
tidxs = wmap.get_target_idxs([channel])
for station, tidx in zip(wmap.stations, tidxs):
logger.debug('Channel %s of Station %s ' % (
channel, station.station))
crust_targets = heart.init_seismic_targets(
stations=[station],
earth_model_name=sc.gf_config.earth_model_name,
channels=channel,
sample_rate=sc.gf_config.sample_rate,
crust_inds=crust_inds,
reference_location=sc.gf_config.reference_location)
t0 = time()
cov_pv = cov.seismic_cov_velocity_models(
engine=self.engine,
sources=self.sources,
targets=crust_targets,
wavename=wmap.name,
arrival_taper=wc.arrival_taper,
arrival_time=arrival_times[tidx],
filterer=wc.filterer,
chop_bounds=chop_bounds,
plot=plot, n_jobs=n_jobs)
t1 = time()
logger.debug(
'%s: Calculate weight time %f' % (
station.station, (t1 - t0)))
cov_pv = utility.ensure_cov_psd(cov_pv)
self.engine.close_cashed_stores()
dataset = wmap.datasets[tidx]
dataset.covariance.pred_v = cov_pv
else:
logger.info(
'Not updating seismic velocity model-covariances because '
'number of model variations is too low! < %i' % thresh)
for wmap in self.wavemaps:
logger.info('Updating weights of wavemap %s' % wmap._mapid)
for i, dataset in enumerate(wmap.datasets):
choli = dataset.covariance.chol_inverse
# update shared variables
dataset.covariance.update_slog_pdet()
wmap.weights[i].set_value(choli)
class SeismicDistributerComposite(SeismicComposite):
"""
Comprises how to solve the seismic (kinematic) linear forward model.
Distributed slip
"""
def __init__(self, sc, project_dir, events, hypers=False):
super(SeismicDistributerComposite, self).__init__(
sc, events, project_dir, hypers=hypers)
self.gfs = {}
self.gf_names = {}
self.choppers = {}
self.sweep_implementation = 'c'
self._mode = 'ffi'
self.gfpath = os.path.join(
project_dir, self._mode, bconfig.linear_gf_dir_name)
self.config = sc
dgc = sc.gf_config.discretization_config
for pw, pl in zip(dgc.patch_widths, dgc.patch_lengths):
if pw != pl:
raise ValueError(
'So far only square patches supported in kinematic'
' model! - fast_sweeping issues')
if len(sc.gf_config.reference_sources) > 1:
logger.warning(
'So far only rupture propagation on each subfault individually')
self.fault = self.load_fault_geometry()
logger.info('Fault(s) discretized to %s [km]'
' patches.' % utility.list2string(dgc.patch_lengths))
if not hypers:
self.sweepers = []
for idx in range(self.fault.nsubfaults):
n_p_dip, n_p_strike = \
self.fault.ordering.get_subfault_discretization(idx)
self.sweepers.append(theanof.Sweeper(
dgc.patch_lengths[idx],
n_p_dip,
n_p_strike,
self.sweep_implementation))
for wmap in self.wavemaps:
logger.info(
'Preparing data of "%s" for optimization' % wmap._mapid)
wmap.prepare_data(
source=self.events[wmap.config.event_idx],
engine=self.engine,
outmode='array',
chop_bounds=['b', 'c'])
def load_fault_geometry(self):
"""
Load fault-geometry, i.e. discretized patches.
Returns
-------
:class:`heart.FaultGeometry`
"""
try:
return utility.load_objects(
os.path.join(self.gfpath, bconfig.fault_geometry_name))[0]
except Exception:
raise FaultGeometryNotFoundError()
def point2sources(self, point):
"""
Returns the fault source patche(s) with the point values updated.
Parameters
----------
point : dict
with random variables from solution space
"""
tpoint = copy.deepcopy(point)
if self.nevents == 1:
events = [self.event] # single event
else:
events = self.events # multi event
return self.fault.point2sources(tpoint, events=events)
def get_gflibrary_key(self, crust_ind, wavename, component):
return '%i_%s_%s' % (crust_ind, wavename, component)
def load_gfs(self, crust_inds=None, make_shared=True):
"""
Load Greens Function matrixes for each variable to be inverted for.
Updates gfs and gf_names attributes.
Parameters
----------
crust_inds : list
of int to indexes of Green's Functions
make_shared : bool
if True transforms gfs to :class:`theano.shared` variables
"""
if not isinstance(crust_inds, list):
raise TypeError('crust_inds need to be a list!')
if crust_inds is None:
crust_inds = range(*self.config.gf_config.n_variations)
for wmap in self.wavemaps:
for crust_ind in crust_inds:
gfs = {}
for var in self.slip_varnames:
gflib_name = get_gf_prefix(
datatype=self.name, component=var,
wavename=wmap._mapid, crust_ind=crust_ind)
gfpath = os.path.join(
self.gfpath, gflib_name + '.yaml')
if not os.path.exists(gfpath):
filename = get_gf_prefix(
datatype=self.name, component=var,
wavename=wmap.config.name, crust_ind=crust_ind)
logger.warning(
'Seismic GFLibrary %s does not exist, '
'trying to load with old naming: %s' % (
gflib_name, filename))
gfpath = os.path.join(
self.gfpath, filename + '.yaml')
else:
logger.info(
'Loading SeismicGFLibrary %s ' % gflib_name)
filename = gflib_name
gfs = load_gf_library(
directory=self.gfpath, filename=filename)
if make_shared:
gfs.init_optimization()
key = self.get_gflibrary_key(
crust_ind=crust_ind,
wavename=wmap._mapid,
component=var)
self.gf_names[key] = gfpath
self.gfs[key] = gfs
def get_formula(self, input_rvs, fixed_rvs, hyperparams, problem_config):
# no a, d taper bounds as GF library saved between b c
chop_bounds=['b', 'c']
logger.info("Loading %s Green's Functions" % self.name)
self.load_gfs(
crust_inds=[self.config.gf_config.reference_model_idx],
make_shared=False)
hp_specific = self.config.dataset_specific_residual_noise_estimation
tpoint = problem_config.get_test_point()
self.input_rvs = input_rvs
self.fixed_rvs = fixed_rvs
logger.info(
'Seismic optimization on: \n '
' %s' % ', '.join(self.input_rvs.keys()))
t2 = time()
wlogpts = []
self.analyse_noise(tpoint, chop_bounds=chop_bounds)
for gfs in self.gfs.values():
gfs.init_optimization()
self.init_weights()
self.init_hierarchicals(problem_config)
if self.config.station_corrections:
logger.info(
'Initialized %i hierarchical parameters for '
'station corrections.' % len(self.get_all_station_names()))
self.input_rvs.update(fixed_rvs)
ref_idx = self.config.gf_config.reference_model_idx
nuc_strike = input_rvs['nucleation_strike']
nuc_dip = input_rvs['nucleation_dip']
t2 = time()
# convert velocities to rupture onset
logger.debug('Fast sweeping ...')
starttimes0 = tt.zeros((self.fault.npatches), dtype=tconfig.floatX)
for index in range(self.fault.nsubfaults):
nuc_dip_idx, nuc_strike_idx = self.fault.fault_locations2idxs(
index=index,
positions_dip=nuc_dip[index],
positions_strike=nuc_strike[index],
backend='theano')
sf_patch_indexs = self.fault.cum_subfault_npatches[index:index + 2]
starttimes_tmp = self.sweepers[index](
(1. / self.fault.vector2subfault(
index, input_rvs['velocities'])),
nuc_dip_idx, nuc_strike_idx)
starttimes_tmp += input_rvs['time'][index]
starttimes0 = tt.set_subtensor(
starttimes0[sf_patch_indexs[0]:sf_patch_indexs[1]],
starttimes_tmp)
wlogpts = []
for wmap in self.wavemaps:
wc = wmap.config
# station corrections
if len(self.hierarchicals) > 0:
logger.info('Applying station corrections ...')
starttimes = (
tt.tile(starttimes0, wmap.n_t) -
tt.repeat(self.hierarchicals[wmap.time_shifts_id][
wmap.station_correction_idxs],
self.fault.npatches)).reshape(
(wmap.n_t, self.fault.npatches))
else:
logger.info('No station corrections ...')
starttimes = tt.tile(starttimes0, wmap.n_t).reshape(
(wmap.n_t, self.fault.npatches))
targetidxs = shared(
num.atleast_2d(num.arange(wmap.n_t)).T, borrow=True)
logger.debug('Stacking %s phase ...' % wc.name)
synthetics = tt.zeros(
(wmap.n_t, wc.arrival_taper.nsamples(
self.config.gf_config.sample_rate)),
dtype=tconfig.floatX)
# make sure data is init as array, if non-toeplitz above-traces!
wmap.prepare_data(
source=self.events[wc.event_idx],
engine=self.engine,
outmode='array',
chop_bounds=chop_bounds)
for var in self.slip_varnames:
logger.debug('Stacking %s variable' % var)
key = self.get_gflibrary_key(
crust_ind=ref_idx, wavename=wmap._mapid, component=var)
logger.debug('GF Library key %s' % key)
synthetics += self.gfs[key].stack_all(
targetidxs=targetidxs,
starttimes=starttimes,
durations=input_rvs['durations'],
slips=input_rvs[var],
interpolation=wc.interpolation)
residuals = wmap.shared_data_array - synthetics
logger.debug('Calculating likelihoods ...')
logpts = multivariate_normal_chol(
wmap.datasets, wmap.weights, hyperparams, residuals,
hp_specific=hp_specific)
wlogpts.append(logpts)
t3 = time()
logger.debug(
'Seismic formula on test model takes: %f' % (t3 - t2))
llk = Deterministic(self._like_name, tt.concatenate((wlogpts)))
return llk.sum()
def get_synthetics(self, point, **kwargs):
"""
Get synthetics for given point in solution space.
Parameters
----------
point : :func:`pymc3.Point`
Dictionary with model parameters
kwargs especially to change output of the forward model
outmode: stacked_traces/ tapered_data/ array
Returns
-------
list with :class:`heart.SeismicDataset` synthetics for each target
"""
outmode = kwargs.pop('outmode', 'stacked_traces')
patchidxs = kwargs.pop('patchidxs', None)
if patchidxs is None:
patchidxs = num.arange(self.fault.npatches, dtype='int')
# GF library cut in between [b, c] no [a,d] possible
chop_bounds = ['b', 'c']
order = kwargs.pop('order', 'list')
ref_idx = self.config.gf_config.reference_model_idx
if len(self.gfs) == 0:
self.load_gfs(
crust_inds=[ref_idx],
make_shared=False)
for gfs in self.gfs.values():
gfs.set_stack_mode('numpy')
tpoint = copy.deepcopy(point)
hps = self.config.get_hypernames()
for hyper in hps:
if hyper in tpoint:
tpoint.pop(hyper)
starttimes0 = num.zeros((self.fault.npatches), dtype=tconfig.floatX)
for index in range(self.fault.nsubfaults):
starttimes_tmp = self.fault.point2starttimes(
tpoint, index=index).ravel()
sf_patch_indexs = self.fault.cum_subfault_npatches[index:index + 2]
starttimes0[sf_patch_indexs[0]:sf_patch_indexs[1]] = starttimes_tmp
synth_traces = []
obs_traces = []
for wmap in self.wavemaps:
wc = wmap.config
starttimes = num.tile(
starttimes0, wmap.n_t).reshape(
wmap.n_t, self.fault.npatches)
# station corrections
if self.config.station_corrections:
logger.debug(
'Applying station corrections '
'for wmap {}'.format(wmap._mapid))
try:
corrections = point[wmap.time_shifts_id]
except KeyError: # got reference point from config
corrections = float(point[self.correction_name]) * \
num.ones(wmap.n_t)
starttimes -= num.repeat(
corrections[wmap.station_correction_idxs],
self.fault.npatches).reshape(
wmap.n_t, self.fault.npatches)
# TODO check targetidxs if station blacklisted!?
targetidxs = num.atleast_2d(num.arange(wmap.n_t)).T
synthetics = num.zeros(
(wmap.n_t, wc.arrival_taper.nsamples(
self.config.gf_config.sample_rate)))
for var in self.slip_varnames:
key = self.get_gflibrary_key(
crust_ind=ref_idx, wavename=wmap._mapid, component=var)
try:
logger.debug('Accessing GF Library key %s' % key)
gflibrary = self.gfs[key]
except KeyError:
raise KeyError(
'GF library %s not loaded! Loaded GFs:'
' %s' % (key, utility.list2string(self.gfs.keys())))
from time import time
gflibrary.set_stack_mode('numpy')
t0=time()
synthetics += gflibrary.stack_all(
targetidxs=targetidxs,
starttimes=starttimes[:, patchidxs],
durations=tpoint['durations'][patchidxs],
slips=tpoint[var][patchidxs],
patchidxs=patchidxs,
interpolation=wc.interpolation)
t1=time()
logger.debug(
'{} seconds to stack {}'.format((t1 - t0), wmap._mapid))
wmap_synthetics = []
if outmode != 'array':
for i, target in enumerate(wmap.targets):
tr = Trace(
ydata=synthetics[i, :],
tmin=float(
gflibrary.reference_times[i]),
deltat=gflibrary.deltat)
tr.set_codes(*target.codes)
if outmode == 'tapered_data':
# TODO subfault individual synthetics (use patchidxs arg)
tr = [tr]
wmap_synthetics.append(tr)
elif outmode == 'array':
wmap_synthetics.extend(synthetics)
else:
raise ValueError(
'Supported outmodes: stacked_traces, tapered_data, array! '
'Given outmode: %s !' % outmode)
wmap.prepare_data(
source=self.events[wc.event_idx],
engine=self.engine,
outmode=outmode,
chop_bounds=chop_bounds)
if order == 'list':
synth_traces.extend(wmap_synthetics)
obs_traces.extend(wmap._prepared_data)
elif order == 'wmap':
synth_traces.append(wmap_synthetics)
obs_traces.append(wmap._prepared_data)
else:
raise ValueError('Order "%s" is not supported' % order)
return synth_traces, obs_traces
def update_weights(
self, point, n_jobs=1, plot=False, chop_bounds=['b', 'c']):
"""
Updates weighting matrixes (in place) with respect to the point in the
solution space.
Parameters
----------
point : dict
with numpy array-like items and variable name keys
"""
if not self.weights:
self.init_weights()
# update data covariances in case model dependend non-toeplitz
if self.config.noise_estimator.structure == 'non-toeplitz':
logger.info('Updating data-covariances ...')
self.analyse_noise(point, chop_bounds=chop_bounds)
for wmap in self.wavemaps:
logger.info('Updating weights of wavemap %s' % wmap._mapid)
for i, dataset in enumerate(wmap.datasets):
choli = dataset.covariance.chol_inverse
# update shared variables
dataset.covariance.update_slog_pdet()
wmap.weights[i].set_value(choli)
|
gpl-3.0
|
yavalvas/yav_com
|
build/matplotlib/doc/mpl_examples/pylab_examples/finance_demo.py
|
3
|
1106
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter, WeekdayLocator,\
DayLocator, MONDAY
from matplotlib.finance import quotes_historical_yahoo_ohlc, candlestick_ohlc
# (Year, month, day) tuples suffice as args for quotes_historical_yahoo
date1 = (2004, 2, 1)
date2 = (2004, 4, 12)
mondays = WeekdayLocator(MONDAY) # major ticks on the mondays
alldays = DayLocator() # minor ticks on the days
weekFormatter = DateFormatter('%b %d') # e.g., Jan 12
dayFormatter = DateFormatter('%d') # e.g., 12
quotes = quotes_historical_yahoo_ohlc('INTC', date1, date2)
if len(quotes) == 0:
raise SystemExit
fig, ax = plt.subplots()
fig.subplots_adjust(bottom=0.2)
ax.xaxis.set_major_locator(mondays)
ax.xaxis.set_minor_locator(alldays)
ax.xaxis.set_major_formatter(weekFormatter)
#ax.xaxis.set_minor_formatter(dayFormatter)
#plot_day_summary(ax, quotes, ticksize=3)
candlestick_ohlc(ax, quotes, width=0.6)
ax.xaxis_date()
ax.autoscale_view()
plt.setp(plt.gca().get_xticklabels(), rotation=45, horizontalalignment='right')
plt.show()
|
mit
|
AtsushiSakai/PyAdvancedControl
|
steer_vehicle_model/kinematic_bicycle_model.py
|
1
|
1771
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""
Kinematic Bicycle Model
author Atsushi Sakai
"""
import math
dt = 0.1 # [s]
L = 2.9 # [m]
Lr = 1.4 # [m]
class State:
def __init__(self, x=Lr, y=0.0, yaw=0.0, v=0.0, beta=0.0):
self.x = x
self.y = y
self.yaw = yaw
self.v = v
self.beta = beta
def update(state, a, delta):
state.beta = math.atan2(Lr / L * math.tan(delta), 1.0)
state.x = state.x + state.v * math.cos(state.yaw + state.beta) * dt
state.y = state.y + state.v * math.sin(state.yaw + state.beta) * dt
state.yaw = state.yaw + state.v / Lr * math.sin(state.beta) * dt
state.v = state.v + a * dt
# print(state.x, state.y, state.yaw, state.v)
return state
if __name__ == '__main__':
print("start Kinematic Bicycle model simulation")
import matplotlib.pyplot as plt
import numpy as np
T = 100
a = [1.0] * T
delta = [math.radians(1.0)] * T
# print(a, delta)
state = State()
x = []
y = []
yaw = []
v = []
beta = []
time = []
time = []
t = 0.0
for (ai, di) in zip(a, delta):
t = t + dt
state = update(state, ai, di)
x.append(state.x)
y.append(state.y)
yaw.append(state.yaw)
v.append(state.v)
beta.append(state.beta)
time.append(t)
flg, ax = plt.subplots(1)
plt.plot(x, y)
plt.xlabel("x[m]")
plt.ylabel("y[m]")
plt.axis("equal")
plt.grid(True)
flg, ax = plt.subplots(1)
plt.plot(time, np.array(v) * 3.6)
plt.xlabel("Time[km/h]")
plt.ylabel("velocity[m]")
plt.grid(True)
# flg, ax = plt.subplots(1)
# plt.plot([math.degrees(ibeta) for ibeta in beta])
# plt.grid(True)
plt.show()
|
mit
|
hainm/statsmodels
|
statsmodels/duration/tests/test_phreg.py
|
10
|
11984
|
import os
import numpy as np
from statsmodels.duration.hazard_regression import PHReg
from numpy.testing import (assert_allclose,
assert_equal)
import pandas as pd
# TODO: Include some corner cases: data sets with empty strata, strata
# with no events, entry times after censoring times, etc.
# All the R results
from . import survival_r_results
from . import survival_enet_r_results
"""
Tests of PHReg against R coxph.
Tests include entry times and stratification.
phreg_gentests.py generates the test data sets and puts them into the
results folder.
survival.R runs R on all the test data sets and constructs the
survival_r_results module.
"""
# Arguments passed to the PHReg fit method.
args = {"method": "bfgs", "disp": 0}
def get_results(n, p, ext, ties):
if ext is None:
coef_name = "coef_%d_%d_%s" % (n, p, ties)
se_name = "se_%d_%d_%s" % (n, p, ties)
time_name = "time_%d_%d_%s" % (n, p, ties)
hazard_name = "hazard_%d_%d_%s" % (n, p, ties)
else:
coef_name = "coef_%d_%d_%s_%s" % (n, p, ext, ties)
se_name = "se_%d_%d_%s_%s" % (n, p, ext, ties)
time_name = "time_%d_%d_%s_%s" % (n, p, ext, ties)
hazard_name = "hazard_%d_%d_%s_%s" % (n, p, ext, ties)
coef = getattr(survival_r_results, coef_name)
se = getattr(survival_r_results, se_name)
time = getattr(survival_r_results, time_name)
hazard = getattr(survival_r_results, hazard_name)
return coef, se, time, hazard
class TestPHReg(object):
# Load a data file from the results directory
def load_file(self, fname):
cur_dir = os.path.dirname(os.path.abspath(__file__))
data = np.genfromtxt(os.path.join(cur_dir, 'results', fname),
delimiter=" ")
time = data[:,0]
status = data[:,1]
entry = data[:,2]
exog = data[:,3:]
return time, status, entry, exog
# Run a single test against R output
def do1(self, fname, ties, entry_f, strata_f):
# Read the test data.
time, status, entry, exog = self.load_file(fname)
n = len(time)
vs = fname.split("_")
n = int(vs[2])
p = int(vs[3].split(".")[0])
ties1 = ties[0:3]
# Needs to match the kronecker statement in survival.R
strata = np.kron(range(5), np.ones(n // 5))
# No stratification or entry times
mod = PHReg(time, exog, status, ties=ties)
phrb = mod.fit(**args)
coef_r, se_r, time_r, hazard_r = get_results(n, p, None, ties1)
assert_allclose(phrb.params, coef_r, rtol=1e-3)
assert_allclose(phrb.bse, se_r, rtol=1e-4)
#time_h, cumhaz, surv = phrb.baseline_hazard[0]
# Entry times but no stratification
phrb = PHReg(time, exog, status, entry=entry,
ties=ties).fit(**args)
coef, se, time_r, hazard_r = get_results(n, p, "et", ties1)
assert_allclose(phrb.params, coef, rtol=1e-3)
assert_allclose(phrb.bse, se, rtol=1e-3)
# Stratification but no entry times
phrb = PHReg(time, exog, status, strata=strata,
ties=ties).fit(**args)
coef, se, time_r, hazard_r = get_results(n, p, "st", ties1)
assert_allclose(phrb.params, coef, rtol=1e-4)
assert_allclose(phrb.bse, se, rtol=1e-4)
# Stratification and entry times
phrb = PHReg(time, exog, status, entry=entry,
strata=strata, ties=ties).fit(**args)
coef, se, time_r, hazard_r = get_results(n, p, "et_st", ties1)
assert_allclose(phrb.params, coef, rtol=1e-3)
assert_allclose(phrb.bse, se, rtol=1e-4)
# Run all the tests
def test_r(self):
cur_dir = os.path.dirname(os.path.abspath(__file__))
rdir = os.path.join(cur_dir, 'results')
fnames = os.listdir(rdir)
fnames = [x for x in fnames if x.startswith("survival")
and x.endswith(".csv")]
for fname in fnames:
for ties in "breslow","efron":
for entry_f in False,True:
for strata_f in False,True:
yield (self.do1, fname, ties, entry_f,
strata_f)
def test_missing(self):
np.random.seed(34234)
time = 50 * np.random.uniform(size=200)
status = np.random.randint(0, 2, 200).astype(np.float64)
exog = np.random.normal(size=(200,4))
time[0:5] = np.nan
status[5:10] = np.nan
exog[10:15,:] = np.nan
md = PHReg(time, exog, status, missing='drop')
assert_allclose(len(md.endog), 185)
assert_allclose(len(md.status), 185)
assert_allclose(md.exog.shape, np.r_[185,4])
def test_formula(self):
np.random.seed(34234)
time = 50 * np.random.uniform(size=200)
status = np.random.randint(0, 2, 200).astype(np.float64)
exog = np.random.normal(size=(200,4))
entry = np.zeros_like(time)
entry[0:10] = time[0:10] / 2
df = pd.DataFrame({"time": time, "status": status,
"exog1": exog[:, 0], "exog2": exog[:, 1],
"exog3": exog[:, 2], "exog4": exog[:, 3],
"entry": entry})
mod1 = PHReg(time, exog, status, entry=entry)
rslt1 = mod1.fit()
fml = "time ~ 0 + exog1 + exog2 + exog3 + exog4"
mod2 = PHReg.from_formula(fml, df, status=status,
entry=entry)
rslt2 = mod2.fit()
mod3 = PHReg.from_formula(fml, df, status="status",
entry="entry")
rslt3 = mod3.fit()
assert_allclose(rslt1.params, rslt2.params)
assert_allclose(rslt1.params, rslt3.params)
assert_allclose(rslt1.bse, rslt2.bse)
assert_allclose(rslt1.bse, rslt3.bse)
def test_predict_formula(self):
n = 100
np.random.seed(34234)
time = 50 * np.random.uniform(size=n)
status = np.random.randint(0, 2, n).astype(np.float64)
exog = np.random.uniform(1, 2, size=(n, 2))
df = pd.DataFrame({"time": time, "status": status,
"exog1": exog[:, 0], "exog2": exog[:, 1]})
fml = "time ~ 0 + exog1 + np.log(exog2) + exog1*exog2"
model1 = PHReg.from_formula(fml, df, status=status)
result1 = model1.fit()
from patsy import dmatrix
dfp = dmatrix(model1.data.design_info.builder, df)
pr1 = result1.predict()
pr2 = result1.predict(exog=df)
pr3 = model1.predict(result1.params, exog=dfp) # No standard errors
pr4 = model1.predict(result1.params, cov_params=result1.cov_params(), exog=dfp)
prl = (pr1, pr2, pr3, pr4)
for i in range(4):
for j in range(i):
assert_allclose(prl[i].predicted_values, prl[j].predicted_values)
prl = (pr1, pr2, pr4)
for i in range(3):
for j in range(i):
assert_allclose(prl[i].standard_errors, prl[j].standard_errors)
def test_offset(self):
np.random.seed(34234)
time = 50 * np.random.uniform(size=200)
status = np.random.randint(0, 2, 200).astype(np.float64)
exog = np.random.normal(size=(200,4))
mod1 = PHReg(time, exog, status)
rslt1 = mod1.fit()
offset = exog[:,0] * rslt1.params[0]
exog = exog[:, 1:]
mod2 = PHReg(time, exog, status, offset=offset)
rslt2 = mod2.fit()
assert_allclose(rslt2.params, rslt1.params[1:])
def test_post_estimation(self):
# All regression tests
np.random.seed(34234)
time = 50 * np.random.uniform(size=200)
status = np.random.randint(0, 2, 200).astype(np.float64)
exog = np.random.normal(size=(200,4))
mod = PHReg(time, exog, status)
rslt = mod.fit()
mart_resid = rslt.martingale_residuals
assert_allclose(np.abs(mart_resid).sum(), 120.72475743348433)
w_avg = rslt.weighted_covariate_averages
assert_allclose(np.abs(w_avg[0]).sum(0),
np.r_[7.31008415, 9.77608674,10.89515885, 13.1106801])
bc_haz = rslt.baseline_cumulative_hazard
v = [np.mean(np.abs(x)) for x in bc_haz[0]]
w = np.r_[23.482841556421608, 0.44149255358417017,
0.68660114081275281]
assert_allclose(v, w)
score_resid = rslt.score_residuals
v = np.r_[ 0.50924792, 0.4533952, 0.4876718, 0.5441128]
w = np.abs(score_resid).mean(0)
assert_allclose(v, w)
groups = np.random.randint(0, 3, 200)
mod = PHReg(time, exog, status)
rslt = mod.fit(groups=groups)
robust_cov = rslt.cov_params()
v = [0.00513432, 0.01278423, 0.00810427, 0.00293147]
w = np.abs(robust_cov).mean(0)
assert_allclose(v, w, rtol=1e-6)
s_resid = rslt.schoenfeld_residuals
ii = np.flatnonzero(np.isfinite(s_resid).all(1))
s_resid = s_resid[ii, :]
v = np.r_[0.85154336, 0.72993748, 0.73758071, 0.78599333]
assert_allclose(np.abs(s_resid).mean(0), v)
def test_summary(self):
# smoke test
np.random.seed(34234)
time = 50 * np.random.uniform(size=200)
status = np.random.randint(0, 2, 200).astype(np.float64)
exog = np.random.normal(size=(200,4))
mod = PHReg(time, exog, status)
rslt = mod.fit()
rslt.summary()
def test_predict(self):
# All smoke tests. We should be able to convert the lhr and hr
# tests into real tests against R. There are many options to
# this function that may interact in complicated ways. Only a
# few key combinations are tested here.
np.random.seed(34234)
endog = 50 * np.random.uniform(size=200)
status = np.random.randint(0, 2, 200).astype(np.float64)
exog = np.random.normal(size=(200,4))
mod = PHReg(endog, exog, status)
rslt = mod.fit()
rslt.predict()
for pred_type in 'lhr', 'hr', 'cumhaz', 'surv':
rslt.predict(pred_type=pred_type)
rslt.predict(endog=endog[0:10], pred_type=pred_type)
rslt.predict(endog=endog[0:10], exog=exog[0:10,:],
pred_type=pred_type)
def test_get_distribution(self):
# Smoke test
np.random.seed(34234)
exog = np.random.normal(size=(200, 2))
lin_pred = exog.sum(1)
elin_pred = np.exp(-lin_pred)
time = -elin_pred * np.log(np.random.uniform(size=200))
mod = PHReg(time, exog)
rslt = mod.fit()
dist = rslt.get_distribution()
fitted_means = dist.mean()
true_means = elin_pred
fitted_var = dist.var()
fitted_sd = dist.std()
sample = dist.rvs()
def test_fit_regularized(self):
# Data set sizes
for n,p in (50,2),(100,5):
# Penalty weights
for js,s in enumerate([0,0.1]):
coef_name = "coef_%d_%d_%d" % (n, p, js)
coef = getattr(survival_enet_r_results, coef_name)
fname = "survival_data_%d_%d.csv" % (n, p)
time, status, entry, exog = self.load_file(fname)
exog -= exog.mean(0)
exog /= exog.std(0, ddof=1)
mod = PHReg(time, exog, status=status, ties='breslow')
rslt = mod.fit_regularized(alpha=s)
# The agreement isn't very high, the issue may be on
# their side. They seem to use some approximations
# that we are not using.
assert_allclose(rslt.params, coef, rtol=0.3)
# Smoke test for summary
smry = rslt.summary()
if __name__=="__main__":
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
|
bsd-3-clause
|
nelsonag/openmc
|
tests/regression_tests/surface_tally/test.py
|
9
|
7315
|
import numpy as np
import openmc
import pandas as pd
from tests.testing_harness import PyAPITestHarness
class SurfaceTallyTestHarness(PyAPITestHarness):
def _build_inputs(self):
# Instantiate some Materials and register the appropriate Nuclides
uo2 = openmc.Material(name='UO2 fuel at 2.4% wt enrichment')
uo2.set_density('g/cc', 10.0)
uo2.add_nuclide('U238', 1.0)
uo2.add_nuclide('U235', 0.02)
uo2.add_nuclide('O16', 2.0)
borated_water = openmc.Material(name='Borated water')
borated_water.set_density('g/cm3', 1)
borated_water.add_nuclide('B10', 10e-5)
borated_water.add_nuclide('H1', 2.0)
borated_water.add_nuclide('O16', 1.0)
# Instantiate a Materials collection and export to XML
materials_file = openmc.Materials([uo2, borated_water])
materials_file.export_to_xml()
# Instantiate ZCylinder surfaces
fuel_or = openmc.ZCylinder(surface_id=1, x0=0, y0=0, r=1,
name='Fuel OR')
left = openmc.XPlane(surface_id=2, x0=-2, name='left')
right = openmc.XPlane(surface_id=3, x0=2, name='right')
bottom = openmc.YPlane(y0=-2, name='bottom')
top = openmc.YPlane(y0=2, name='top')
left.boundary_type = 'vacuum'
right.boundary_type = 'reflective'
top.boundary_type = 'reflective'
bottom.boundary_type = 'reflective'
# Instantiate Cells
fuel = openmc.Cell(name='fuel')
water = openmc.Cell(name='water')
# Use surface half-spaces to define regions
fuel.region = -fuel_or
water.region = +fuel_or & -right & +bottom & -top
# Register Materials with Cells
fuel.fill = uo2
water.fill = borated_water
# Instantiate pin cell Universe
pin_cell = openmc.Universe(name='pin cell')
pin_cell.add_cells([fuel, water])
# Instantiate root Cell and Universe
root_cell = openmc.Cell(name='root cell')
root_cell.region = +left & -right & +bottom & -top
root_cell.fill = pin_cell
root_univ = openmc.Universe(universe_id=0, name='root universe')
root_univ.add_cell(root_cell)
# Instantiate a Geometry, register the root Universe
geometry = openmc.Geometry(root_univ)
geometry.export_to_xml()
# Instantiate a Settings object, set all runtime parameters
settings_file = openmc.Settings()
settings_file.batches = 10
settings_file.inactive = 0
settings_file.particles = 1000
#settings_file.output = {'tallies': True}
# Create an initial uniform spatial source distribution
bounds = [-0.62992, -0.62992, -1, 0.62992, 0.62992, 1]
uniform_dist = openmc.stats.Box(bounds[:3], bounds[3:],\
only_fissionable=True)
settings_file.source = openmc.source.Source(space=uniform_dist)
settings_file.export_to_xml()
# Tallies file
tallies_file = openmc.Tallies()
# Create partial current tallies from fuel to water
# Filters
two_groups = [0., 4e6, 20e6]
energy_filter = openmc.EnergyFilter(two_groups)
polar_filter = openmc.PolarFilter([0, np.pi / 4, np.pi])
azimuthal_filter = openmc.AzimuthalFilter([0, np.pi / 4, np.pi])
surface_filter = openmc.SurfaceFilter([1])
cell_from_filter = openmc.CellFromFilter(fuel)
cell_filter = openmc.CellFilter(water)
# Use Cell to cell filters for partial current
cell_to_cell_tally = openmc.Tally(name=str('fuel_to_water_1'))
cell_to_cell_tally.filters = [cell_from_filter, cell_filter, \
energy_filter, polar_filter, azimuthal_filter]
cell_to_cell_tally.scores = ['current']
tallies_file.append(cell_to_cell_tally)
# Use a Cell from + surface filters for partial current
cell_to_cell_tally = openmc.Tally(name=str('fuel_to_water_2'))
cell_to_cell_tally.filters = [cell_from_filter, surface_filter, \
energy_filter, polar_filter, azimuthal_filter]
cell_to_cell_tally.scores = ['current']
tallies_file.append(cell_to_cell_tally)
# Create partial current tallies from water to fuel
# Filters
cell_from_filter = openmc.CellFromFilter(water)
cell_filter = openmc.CellFilter(fuel)
# Cell to cell filters for partial current
cell_to_cell_tally = openmc.Tally(name=str('water_to_fuel_1'))
cell_to_cell_tally.filters = [cell_from_filter, cell_filter, \
energy_filter, polar_filter, azimuthal_filter]
cell_to_cell_tally.scores = ['current']
tallies_file.append(cell_to_cell_tally)
# Cell from + surface filters for partial current
cell_to_cell_tally = openmc.Tally(name=str('water_to_fuel_2'))
cell_to_cell_tally.filters = [cell_from_filter, surface_filter, \
energy_filter, polar_filter, azimuthal_filter]
cell_to_cell_tally.scores = ['current']
tallies_file.append(cell_to_cell_tally)
# Create a net current tally on inner surface using a surface filter
surface_filter = openmc.SurfaceFilter([1])
surf_tally1 = openmc.Tally(name='net_cylinder')
surf_tally1.filters = [surface_filter, energy_filter, polar_filter, \
azimuthal_filter]
surf_tally1.scores = ['current']
tallies_file.append(surf_tally1)
# Create a net current tally on left surface using a surface filter
# This surface has a vacuum boundary condition, so leakage is tallied
surface_filter = openmc.SurfaceFilter([2])
surf_tally2 = openmc.Tally(name='leakage_left')
surf_tally2.filters = [surface_filter, energy_filter, polar_filter, \
azimuthal_filter]
surf_tally2.scores = ['current']
tallies_file.append(surf_tally2)
# Create a net current tally on right surface using a surface filter
# This surface has a reflective boundary condition, so the net current
# should be zero.
surface_filter = openmc.SurfaceFilter([3])
surf_tally3 = openmc.Tally(name='net_right')
surf_tally3.filters = [surface_filter, energy_filter]
surf_tally3.scores = ['current']
tallies_file.append(surf_tally3)
surface_filter = openmc.SurfaceFilter([3])
surf_tally3 = openmc.Tally(name='net_right')
surf_tally3.filters = [surface_filter, energy_filter]
surf_tally3.scores = ['current']
tallies_file.append(surf_tally3)
tallies_file.export_to_xml()
def _get_results(self):
"""Digest info in the statepoint and return as a string."""
# Read the statepoint file.
sp = openmc.StatePoint(self._sp_name)
# Extract the tally data as a Pandas DataFrame.
df = pd.DataFrame()
for t in sp.tallies.values():
df = df.append(t.get_pandas_dataframe(), ignore_index=True)
# Extract the relevant data as a CSV string.
cols = ('mean', 'std. dev.')
return df.to_csv(None, columns=cols, index=False, float_format='%.7e')
return outstr
def test_surface_tally():
harness = SurfaceTallyTestHarness('statepoint.10.h5')
harness.main()
|
mit
|
LiaoPan/blaze
|
blaze/compute/numpy.py
|
2
|
11106
|
from __future__ import absolute_import, division, print_function
import datetime
import numpy as np
from pandas import DataFrame, Series
from datashape import to_numpy, to_numpy_dtype
from numbers import Number
from ..expr import (
Reduction, Field, Projection, Broadcast, Selection, ndim,
Distinct, Sort, Tail, Head, Label, ReLabel, Expr, Slice, Join,
std, var, count, nunique, Summary, IsIn,
BinOp, UnaryOp, USub, Not, nelements, Repeat, Concat, Interp,
UTCFromTimestamp, DateTimeTruncate,
Transpose, TensorDot, Coerce,
)
from ..utils import keywords
from .core import base, compute
from ..dispatch import dispatch
from odo import into
import pandas as pd
__all__ = ['np']
@dispatch(Field, np.ndarray)
def compute_up(c, x, **kwargs):
if x.dtype.names and c._name in x.dtype.names:
return x[c._name]
if not x.dtype.names and x.shape[1] == len(c._child.fields):
return x[:, c._child.fields.index(c._name)]
raise NotImplementedError() # pragma: no cover
@dispatch(Projection, np.ndarray)
def compute_up(t, x, **kwargs):
if x.dtype.names and all(col in x.dtype.names for col in t.fields):
return x[t.fields]
if not x.dtype.names and x.shape[1] == len(t._child.fields):
return x[:, [t._child.fields.index(col) for col in t.fields]]
raise NotImplementedError() # pragma: no cover
try:
from .numba import broadcast_numba as broadcast_ndarray
except ImportError:
def broadcast_ndarray(t, *data, **kwargs):
del kwargs['scope']
d = dict(zip(t._scalar_expr._leaves(), data))
return compute(t._scalar_expr, d, **kwargs)
compute_up.register(Broadcast, np.ndarray)(broadcast_ndarray)
for i in range(2, 6):
compute_up.register(Broadcast, *([(np.ndarray, Number)] * i))(broadcast_ndarray)
@dispatch(Repeat, np.ndarray)
def compute_up(t, data, _char_mul=np.char.multiply, **kwargs):
if isinstance(t.lhs, Expr):
return _char_mul(data, t.rhs)
else:
return _char_mul(t.lhs, data)
@compute_up.register(Repeat, np.ndarray, (np.ndarray, base))
@compute_up.register(Repeat, base, np.ndarray)
def compute_up_np_repeat(t, lhs, rhs, _char_mul=np.char.multiply, **kwargs):
return _char_mul(lhs, rhs)
def _interp(arr, v, _Series=pd.Series, _charmod=np.char.mod):
"""
Delegate to the most efficient string formatting technique based on
the length of the array.
"""
if len(arr) >= 145:
return _Series(arr) % v
return _charmod(arr, v)
@dispatch(Interp, np.ndarray)
def compute_up(t, data, **kwargs):
if isinstance(t.lhs, Expr):
return _interp(data, t.rhs)
else:
return _interp(t.lhs, data)
@compute_up.register(Interp, np.ndarray, (np.ndarray, base))
@compute_up.register(Interp, base, np.ndarray)
def compute_up_np_interp(t, lhs, rhs, **kwargs):
return _interp(lhs, rhs)
@dispatch(BinOp, np.ndarray, (np.ndarray, base))
def compute_up(t, lhs, rhs, **kwargs):
return t.op(lhs, rhs)
@dispatch(BinOp, np.ndarray)
def compute_up(t, data, **kwargs):
if isinstance(t.lhs, Expr):
return t.op(data, t.rhs)
else:
return t.op(t.lhs, data)
@dispatch(BinOp, base, np.ndarray)
def compute_up(t, lhs, rhs, **kwargs):
return t.op(lhs, rhs)
@dispatch(UnaryOp, np.ndarray)
def compute_up(t, x, **kwargs):
return getattr(np, t.symbol)(x)
@dispatch(Not, np.ndarray)
def compute_up(t, x, **kwargs):
return ~x
@dispatch(USub, np.ndarray)
def compute_up(t, x, **kwargs):
return -x
inat = np.datetime64('NaT').view('int64')
@dispatch(count, np.ndarray)
def compute_up(t, x, **kwargs):
result_dtype = to_numpy_dtype(t.dshape)
if issubclass(x.dtype.type, (np.floating, np.object_)):
return pd.notnull(x).sum(keepdims=t.keepdims, axis=t.axis,
dtype=result_dtype)
elif issubclass(x.dtype.type, np.datetime64):
return (x.view('int64') != inat).sum(keepdims=t.keepdims, axis=t.axis,
dtype=result_dtype)
else:
return np.ones(x.shape, dtype=result_dtype).sum(keepdims=t.keepdims,
axis=t.axis,
dtype=result_dtype)
@dispatch(nunique, np.ndarray)
def compute_up(t, x, **kwargs):
assert t.axis == tuple(range(ndim(t._child)))
result = len(np.unique(x))
if t.keepdims:
result = np.array([result])
return result
@dispatch(Reduction, np.ndarray)
def compute_up(t, x, **kwargs):
# can't use the method here, as they aren't Python functions
reducer = getattr(np, t.symbol)
if 'dtype' in keywords(reducer):
return reducer(x, axis=t.axis, keepdims=t.keepdims,
dtype=to_numpy_dtype(t.schema))
return reducer(x, axis=t.axis, keepdims=t.keepdims)
def axify(expr, axis, keepdims=False):
""" inject axis argument into expression
Helper function for compute_up(Summary, np.ndarray)
>>> from blaze import symbol
>>> s = symbol('s', '10 * 10 * int')
>>> expr = s.sum()
>>> axify(expr, axis=0)
sum(s, axis=(0,))
"""
return type(expr)(expr._child, axis=axis, keepdims=keepdims)
@dispatch(Summary, np.ndarray)
def compute_up(expr, data, **kwargs):
shape, dtype = to_numpy(expr.dshape)
if shape:
result = np.empty(shape=shape, dtype=dtype)
for n, v in zip(expr.names, expr.values):
result[n] = compute(axify(v, expr.axis, expr.keepdims), data)
return result
else:
return tuple(compute(axify(v, expr.axis), data) for v in expr.values)
@dispatch((std, var), np.ndarray)
def compute_up(t, x, **kwargs):
return getattr(x, t.symbol)(ddof=t.unbiased, axis=t.axis,
keepdims=t.keepdims)
@compute_up.register(Distinct, np.recarray)
def recarray_distinct(t, rec, **kwargs):
return pd.DataFrame.from_records(rec).drop_duplicates(
subset=t.on or None).to_records(index=False).astype(rec.dtype)
@dispatch(Distinct, np.ndarray)
def compute_up(t, arr, _recarray_distinct=recarray_distinct, **kwargs):
if t.on:
if getattr(arr.dtype, 'names', None) is not None:
return _recarray_distinct(t, arr, **kwargs).view(np.ndarray)
else:
raise ValueError('malformed expression: no columns to distinct on')
return np.unique(arr)
@dispatch(Sort, np.ndarray)
def compute_up(t, x, **kwargs):
if x.dtype.names is None: # not a struct array
result = np.sort(x)
elif (t.key in x.dtype.names or # struct array
isinstance(t.key, list) and all(k in x.dtype.names for k in t.key)):
result = np.sort(x, order=t.key)
elif t.key:
raise NotImplementedError("Sort key %s not supported" % t.key)
if not t.ascending:
result = result[::-1]
return result
@dispatch(Head, np.ndarray)
def compute_up(t, x, **kwargs):
return x[:t.n]
@dispatch(Tail, np.ndarray)
def compute_up(t, x, **kwargs):
return x[-t.n:]
@dispatch(Label, np.ndarray)
def compute_up(t, x, **kwargs):
return np.array(x, dtype=[(t.label, x.dtype.type)])
@dispatch(ReLabel, np.ndarray)
def compute_up(t, x, **kwargs):
types = [x.dtype[i] for i in range(len(x.dtype))]
return np.array(x, dtype=list(zip(t.fields, types)))
@dispatch(Selection, np.ndarray)
def compute_up(sel, x, **kwargs):
return x[compute(sel.predicate, {sel._child: x})]
@dispatch(UTCFromTimestamp, np.ndarray)
def compute_up(expr, data, **kwargs):
return (data * 1e6).astype('datetime64[us]')
@dispatch(Slice, np.ndarray)
def compute_up(expr, x, **kwargs):
return x[expr.index]
@dispatch(Expr, np.ndarray)
def compute_up(t, x, **kwargs):
ds = t._child.dshape
if x.ndim > 1 or isinstance(x, np.recarray) or x.dtype.fields is not None:
return compute_up(t, into(DataFrame, x, dshape=ds), **kwargs)
else:
return compute_up(t, into(Series, x, dshape=ds), **kwargs)
@dispatch(nelements, np.ndarray)
def compute_up(expr, data, **kwargs):
axis = expr.axis
if expr.keepdims:
shape = tuple(data.shape[i] if i not in axis else 1
for i in range(ndim(expr._child)))
else:
shape = tuple(data.shape[i] for i in range(ndim(expr._child))
if i not in axis)
value = np.prod([data.shape[i] for i in axis])
result = np.empty(shape)
result.fill(value)
result = result.astype('int64')
return result
# Note the use of 'week': 'M8[D]' here.
# We truncate week offsets "manually" in the compute_up implementation by first
# converting to days then multiplying our measure by 7 this simplifies our code
# by only requiring us to calculate the week offset relative to the day of week.
precision_map = {'year': 'M8[Y]',
'month': 'M8[M]',
'week': 'M8[D]',
'day': 'M8[D]',
'hour': 'M8[h]',
'minute': 'M8[m]',
'second': 'M8[s]',
'millisecond': 'M8[ms]',
'microsecond': 'M8[us]',
'nanosecond': 'M8[ns]'}
# these offsets are integers in units of their representation
epoch = datetime.datetime(1970, 1, 1)
offsets = {
'week': epoch.isoweekday(),
'day': epoch.toordinal() # number of days since *Python's* epoch (01/01/01)
}
@dispatch(DateTimeTruncate, (np.ndarray, np.datetime64))
def compute_up(expr, data, **kwargs):
np_dtype = precision_map[expr.unit]
offset = offsets.get(expr.unit, 0)
measure = expr.measure * 7 if expr.unit == 'week' else expr.measure
result = (((data.astype(np_dtype)
.view('int64')
+ offset)
// measure
* measure
- offset)
.astype(np_dtype))
return result
@dispatch(np.ndarray)
def chunks(x, chunksize=1024):
start = 0
n = len(x)
while start < n:
yield x[start:start + chunksize]
start += chunksize
@dispatch(Transpose, np.ndarray)
def compute_up(expr, x, **kwargs):
return np.transpose(x, axes=expr.axes)
@dispatch(TensorDot, np.ndarray, np.ndarray)
def compute_up(expr, lhs, rhs, **kwargs):
return np.tensordot(lhs, rhs, axes=[expr._left_axes, expr._right_axes])
@dispatch(IsIn, np.ndarray)
def compute_up(expr, data, **kwargs):
return np.in1d(data, tuple(expr._keys))
@compute_up.register(Join, DataFrame, np.ndarray)
@compute_up.register(Join, np.ndarray, DataFrame)
@compute_up.register(Join, np.ndarray, np.ndarray)
def join_ndarray(expr, lhs, rhs, **kwargs):
if isinstance(lhs, np.ndarray):
lhs = DataFrame(lhs)
if isinstance(rhs, np.ndarray):
rhs = DataFrame(rhs)
return compute_up(expr, lhs, rhs, **kwargs)
@dispatch(Coerce, np.ndarray)
def compute_up(expr, data, **kwargs):
return data.astype(to_numpy_dtype(expr.schema))
@dispatch(Concat, np.ndarray, np.ndarray)
def compute_up(expr, lhs, rhs, _concat=np.concatenate, **kwargs):
return _concat((lhs, rhs), axis=expr.axis)
|
bsd-3-clause
|
leesavide/pythonista-docs
|
Documentation/matplotlib/examples/misc/rec_groupby_demo.py
|
9
|
2060
|
from __future__ import print_function
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.cbook as cbook
datafile = cbook.get_sample_data('aapl.csv', asfileobj=False)
print('loading', datafile)
r = mlab.csv2rec(datafile)
r.sort()
def daily_return(prices):
'an array of daily returns from price array'
g = np.zeros_like(prices)
g[1:] = (prices[1:]-prices[:-1])/prices[:-1]
return g
def volume_code(volume):
'code the continuous volume data categorically'
ind = np.searchsorted([1e5,1e6, 5e6,10e6, 1e7], volume)
return ind
# a list of (dtype_name, summary_function, output_dtype_name).
# rec_summarize will call on each function on the indicated recarray
# attribute, and the result assigned to output name in the return
# record array.
summaryfuncs = (
('date', lambda x: [thisdate.year for thisdate in x], 'years'),
('date', lambda x: [thisdate.month for thisdate in x], 'months'),
('date', lambda x: [thisdate.weekday() for thisdate in x], 'weekday'),
('adj_close', daily_return, 'dreturn'),
('volume', volume_code, 'volcode'),
)
rsum = mlab.rec_summarize(r, summaryfuncs)
# stats is a list of (dtype_name, function, output_dtype_name).
# rec_groupby will summarize the attribute identified by the
# dtype_name over the groups in the groupby list, and assign the
# result to the output_dtype_name
stats = (
('dreturn', len, 'rcnt'),
('dreturn', np.mean, 'rmean'),
('dreturn', np.median, 'rmedian'),
('dreturn', np.std, 'rsigma'),
)
# you can summarize over a single variable, like years or months
print('summary by years')
ry = mlab.rec_groupby(rsum, ('years',), stats)
print(mlab. rec2txt(ry))
print('summary by months')
rm = mlab.rec_groupby(rsum, ('months',), stats)
print(mlab.rec2txt(rm))
# or over multiple variables like years and months
print('summary by year and month')
rym = mlab.rec_groupby(rsum, ('years','months'), stats)
print(mlab.rec2txt(rym))
print('summary by volume')
rv = mlab.rec_groupby(rsum, ('volcode',), stats)
print(mlab.rec2txt(rv))
|
apache-2.0
|
bikong2/scikit-learn
|
sklearn/linear_model/tests/test_ridge.py
|
68
|
23597
|
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import KFold
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
# Ridge regression convergence test using score
# TODO: for this test to be robust, we should use a dataset instead
# of np.random.
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr", "sag"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
# Test for fit_intercept = True
est = Ridge(alpha=alpha, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
# Check using Newton's Method
# Quadratic function should be solved in a single step.
# Initialize
sample_weight = np.sqrt(sample_weight)
X_weighted = sample_weight[:, np.newaxis] * (
np.column_stack((np.ones(n_samples), X)))
y_weighted = y * sample_weight
# Gradient is (X*coef-y)*X + alpha*coef_[1:]
# Remove coef since it is initialized to zero.
grad = -np.dot(y_weighted, X_weighted)
# Hessian is (X.T*X) + alpha*I except that the first
# diagonal element should be zero, since there is no
# penalization of intercept.
diag = alpha * np.ones(n_features + 1)
diag[0] = 0.
hess = np.dot(X_weighted.T, X_weighted)
hess.flat[::n_features + 2] += diag
coef_ = - np.dot(linalg.inv(hess), grad)
assert_almost_equal(coef_[0], est.intercept_)
assert_array_almost_equal(coef_[1:], est.coef_)
def test_ridge_shapes():
# Test shape of coef_ and intercept_
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
# Test intercept with multiple targets GH issue #708
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
# Test BayesianRegression ridge classifier
# TODO: test also n_samples > n_features
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
# On alpha=0., Ridge and OLS yield the same solution.
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
# Tests the ridge object using individual penalties
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-8).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky', 'sag']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:-1])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd')
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'balanced' can handle negative labels.
clf = RidgeClassifier(class_weight='balanced')
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'balanced', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
clfa = RidgeClassifier(class_weight='balanced')
clfa.fit(X, y)
assert_equal(len(clfa.classes_), 2)
assert_array_almost_equal(clf.coef_, clfa.coef_)
assert_array_almost_equal(clf.intercept_, clfa.intercept_)
def test_class_weight_vs_sample_weight():
"""Check class_weights resemble sample_weights behavior."""
for clf in (RidgeClassifier, RidgeClassifierCV):
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = clf()
clf1.fit(iris.data, iris.target)
clf2 = clf(class_weight='balanced')
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Check that sample_weight and class_weight are multiplicative
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_class_weights_cv():
# Test class weights for cross validated ridge classifier.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
# Test _RidgeCV's store_cv_values attribute.
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_ridgecv_sample_weight():
rng = np.random.RandomState(0)
alphas = (0.1, 1.0, 10.0)
# There are different algorithms for n_samples > n_features
# and the opposite, so test them both.
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
cv = KFold(n_samples, 5)
ridgecv = RidgeCV(alphas=alphas, cv=cv)
ridgecv.fit(X, y, sample_weight=sample_weight)
# Check using GridSearchCV directly
parameters = {'alpha': alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(), parameters, fit_params=fit_params,
cv=cv)
gs.fit(X, y)
assert_equal(ridgecv.alpha_, gs.best_estimator_.alpha)
assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
# Sample weights must work with sparse matrices
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_raises_value_error_if_solver_not_supported():
# Tests whether a ValueError is raised if a non-identified solver
# is passed to ridge_regression
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
@ignore_warnings
def test_n_iter():
# Test that self.n_iter_ is correct.
n_targets = 2
X, y = X_diabetes, y_diabetes
y_n = np.tile(y, (n_targets, 1)).T
for max_iter in range(1, 4):
for solver in ('sag', 'lsqr'):
reg = Ridge(solver=solver, max_iter=max_iter, tol=1e-12)
reg.fit(X, y_n)
assert_array_equal(reg.n_iter_, np.tile(max_iter, n_targets))
for solver in ('sparse_cg', 'svd', 'cholesky'):
reg = Ridge(solver=solver, max_iter=1, tol=1e-1)
reg.fit(X, y_n)
assert_equal(reg.n_iter_, None)
|
bsd-3-clause
|
ChinaQuants/bokeh
|
bokeh/_legacy_charts/builder/dot_builder.py
|
43
|
6160
|
"""This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Dot class which lets you build your Dot charts just
passing the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import numpy as np
try:
import pandas as pd
except ImportError:
pd = None
from ..utils import chunk, cycle_colors, make_scatter
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, FactorRange, GlyphRenderer, Range1d
from ...models.glyphs import Segment
from ...properties import Any, Bool, Either, List
def Dot(values, cat=None, stem=True, xscale="categorical", yscale="linear",
xgrid=False, ygrid=True, **kws):
""" Create a dot chart using :class:`DotBuilder <bokeh.charts.builder.dot_builder.DotBuilder>`
to render the geometry from values and cat.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
cat (list or bool, optional): list of string representing the categories.
Defaults to None.
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
from collections import OrderedDict
from bokeh.charts import Dot, output_file, show
# dict, OrderedDict, lists, arrays and DataFrames are valid inputs
xyvalues = OrderedDict()
xyvalues['python']=[2, 5]
xyvalues['pypy']=[12, 40]
xyvalues['jython']=[22, 30]
dot = Dot(xyvalues, ['cpu1', 'cpu2'], title='dots')
output_file('dot.html')
show(dot)
"""
return create_and_build(
DotBuilder, values, cat=cat, stem=stem, xscale=xscale, yscale=yscale,
xgrid=xgrid, ygrid=ygrid, **kws
)
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class DotBuilder(Builder):
"""This is the Dot class and it is in charge of plotting Dot chart
in an easy and intuitive way.
Essentially, it provides a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed glyphs (segments and circles) taking
the references from the source.
"""
cat = Either(Bool, List(Any), help="""
List of string representing the categories. (Defaults to None.)
""")
stem = Bool(True, help="""
Whether to draw a stem from each do to the axis.
""")
def _process_data(self):
"""Take the Dot data from the input **value.
It calculates the chart properties accordingly. Then build a dict
containing references to all the calculated points to be used by
the rect glyph inside the ``_yield_renderers`` method.
"""
if not self.cat:
self.cat = [str(x) for x in self._values.index]
self._data = dict(cat=self.cat, zero=np.zeros(len(self.cat)))
# list to save all the attributes we are going to create
# list to save all the groups available in the incoming input
# Grouping
self._groups.extend(self._values.keys())
step = np.linspace(0, 1.0, len(self._values.keys()) + 1, endpoint=False)
for i, (val, values) in enumerate(self._values.items()):
# original y value
self.set_and_get("", val, values)
# x value
cats = [c + ":" + str(step[i + 1]) for c in self.cat]
self.set_and_get("cat", val, cats)
# zeros
self.set_and_get("z_", val, np.zeros(len(values)))
# segment top y value
self.set_and_get("seg_top_", val, values)
def _set_sources(self):
"""Push the Dot data into the ColumnDataSource and calculate
the proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = FactorRange(factors=self._source.data["cat"])
cat = [i for i in self._attr if not i.startswith(("cat",))]
end = 1.1 * max(max(self._data[i]) for i in cat)
self.y_range = Range1d(start=0, end=end)
def _yield_renderers(self):
"""Use the rect glyphs to display the bars.
Takes reference points from data loaded at the source and
renders circle glyphs (and segments) on the related
coordinates.
"""
self._tuples = list(chunk(self._attr, 4))
colors = cycle_colors(self._tuples, self.palette)
# quartet elements are: [data, cat, zeros, segment_top]
for i, quartet in enumerate(self._tuples):
# draw segment first so when scatter will be place on top of it
# and it won't show segment chunk on top of the circle
if self.stem:
glyph = Segment(
x0=quartet[1], y0=quartet[2], x1=quartet[1], y1=quartet[3],
line_color="black", line_width=2
)
yield GlyphRenderer(data_source=self._source, glyph=glyph)
renderer = make_scatter(
self._source, quartet[1], quartet[0], 'circle',
colors[i - 1], line_color='black', size=15, fill_alpha=1.,
)
self._legends.append((self._groups[i], [renderer]))
yield renderer
|
bsd-3-clause
|
Aidan-Bharath/Absorption_Feature
|
absorption_feature.py
|
1
|
6223
|
# written by Aidan Bharath
# Fabry-Perot Etalon Absorption Features
import numpy as np
from matplotlib import pyplot as plt
# Variable Definitions
dg = 1 # HWHM of the incident light source
dh = 1 # HWHM of flatness defect Gaussian
df =8 # Free spectral range of the Etalon
dt = 4 # HWHM of the top hat function
sigma = np.arange(-10,10,0.1)
sigma_o = np.arange(-10,10,0.1)
# range of wavenumbers
# np.arange(start,stop,stepsize)
k = .01 # absorption dip lorentzian HWHM
l = 0.1 # convolution lorentzin HWHM
A = 0 # Etalon absorption coefficient
R = 0.99 # Etalon reflection Coefficient
hr = 1 # decimal value of absorption
a = 50 # temporary fill in soon
###### Code Functionality
# 1 = true , 0 = false
abs_dip = 0
etalon_trans =1
gauss_conv = 0
lorentz_conv = 0
tophat_conv = 0
####### Definitions ############
#Gaussian
def G(dg,df):
return (2*np.pi*dg)/(df*np.sqrt(np.log(2)))
def S(df,sigma,sigma_o):
return ((2*np.pi)/df)*(sigma-sigma_o)
def Gauss(dg,df,sigma,sigma_o):
return (1/(np.sqrt(np.pi)*(G(dg,df))))*np.exp(-(S(df,sigma,sigma_o)/G(dg,df))**2)
#Lorentzian
def L(l,df):
return (2*np.pi*l)/df
def T(df,sigma,sigma_o):
return ((2*np.pi)/df)*(sigma-sigma_o)
def Lorentz(l,df,sigma,sigma_o):
return (L(l,df)/(np.pi))*(1/(L(l,df)**2+T(df,sigma,sigma_o)**2))
#TopHat Function
def TH(df,sigma,sigma_o):
return ((2*np.pi)/df)*(sigma-sigma_o)
def tophat(dt,df,sigma,sigma_o):
#toha = np.zeros(len(sigma))
#toha[abs(TH(df,sigma,sigma_o))<dt] = 1
if abs(TH(df,sigma,sigma_o))<dt:
return 1
else:
return 0
#Etalon Transmission Function
def S(df,sigma,sigma_o):
return ((2*np.pi)/df)*(sigma-sigma_o)
def Etalon(R,A,df,sigma,sigma_o):
return ((1-(A/(1-R)))**2)*((1-R**2)/(2*np.pi))*(1+R**2-2*R*np.cos(S(df,sigma,sigma_o)))**(-1)
#Incident Lineshape
def incident(dg,df,sigma,sigma_o,L=None):
if abs_dip == 1:
return Gauss(dg,df,sigma,sigma_o)*np.exp(-a*Lorentz(L,df,sigma,sigma_o))
else:
return Gauss(dg,df,sigma,sigma_o)
#Gaussian Convolution
def conv_gauss(sigma,sigma_o):
total = 0
for i in xrange(len(sigma_o)):
total += Gauss(dh,df,sigma_o[i],0)*incident(dg,df,sigma,sigma_o[i],l)
return total
#Lorentzian Convolution
def conv_lorentz(sigma,sigma_o):
if gauss_conv == 1:
total = 0
for i in xrange(len(sigma_o)):
total += lineshape1[i]*Lorentz(k,df,sigma,sigma_o[i])
return total
else:
total = 0
for i in xrange(len(sigma_o)):
total += Lorentz(k,df,sigma_o[i],0)*incident(dg,df,sigma,sigma_o[i],l)
return total
#Tophat Convolution
def conv_tophat(sigma,sigma_o):
if gauss_conv == 1 and lorentz_conv == 1:
total = 0
for i in xrange(len(sigma_o)):
total += lineshape2[i]*tophat(dt,df,sigma_o[i],sigma)
return total
elif gauss_conv == 1 and lorentz_conv == 0:
total = 0
for i in xrange(len(sigma_o)):
total += lineshape1[i]*tophat(dt,df,sigma_o[i],sigma)
return total
elif gauss_conv == 0 and lorentz_conv == 1:
total = 0
for i in xrange(len(sigma_o)):
total += lineshape2[i]*tophat(dt,df,sigma_o[i],sigma)
return total
else:
total = 0
for i in xrange(len(sigma_o)):
total += tophat(dt,df,sigma_o[i],0)*incident(dg,df,sigma,sigma_o[i],l)
return total
# Etalon Transmission Function Convolution
def conv_etalon(sigma,sigma_o):
if gauss_conv == 1 and lorentz_conv == 1 and tophat_conv == 1:
total = 0
for i in xrange(len(sigma_o)):
total += lineshape3[i]*Etalon(R,A,df,sigma_o[i],sigma)
return total
elif gauss_conv == 1 and lorentz_conv == 1 and tophat_conv == 0:
total = 0
for i in xrange(len(sigma_o)):
total += lineshape2[i]*Etalon(R,A,df,sigma_o[i],sigma)
return total
elif gauss_conv == 1 and lorentz_conv == 0 and tophat_conv == 1:
total = 0
for i in xrange(len(sigma_o)):
total += lineshape3[i]*Etalon(R,A,df,sigma_o[i],sigma)
return total
elif gauss_conv == 0 and lorentz_conv == 1 and tophat_conv == 1:
total = 0
for i in xrange(len(sigma_o)):
total += lineshape3[i]*Etalon(R,A,df,sigma_o[i],sigma)
return total
elif gauss_conv == 1 and lorentz_conv == 0 and tophat_conv == 0:
total = 0
for i in xrange(len(sigma_o)):
total += lineshape1[i]*Etalon(R,A,df,sigma,sigma_o[i])
return total
elif gauss_conv == 0 and lorentz_conv == 1 and tophat_conv == 0:
total = 0
for i in xrange(len(sigma_o)):
total += lineshape2[i]*Etalon(R,A,df,sigma_o[i],sigma)
return total
elif gauss_conv ==0 and lorentz_conv == 0 and tophat_conv == 1:
total = 0
for i in xrange(len(sigma_o)):
total += lineshape3[i]*Etalon(R,A,df,sigma_o[i],sigma)
return total
else:
total = 0
for i in xrange(len(sigma_o)):
total += Etalon(R,A,df,sigma_o[i],0)*incident(dg,df,sigma,sigma_o[i],l)
return total
####### Calculations ####################
# Gaussian Linshape Convolution
if gauss_conv == 1:
lineshape1 = np.zeros(len(sigma))
for i in xrange(len(sigma)):
lineshape1[i] = conv_gauss(sigma[i],sigma_o)
# Lorentzian Lineshape Convolution
if lorentz_conv == 1:
lineshape2 = np.zeros(len(sigma))
for i in xrange(len(sigma)):
lineshape2[i] = conv_lorentz(sigma[i],sigma_o)
# Top Hat Lineshape Convolution
if tophat_conv == 1:
lineshape3 = np.zeros(len(sigma))
for i in xrange(len(sigma)):
lineshape3[i] = conv_tophat(sigma[i],sigma_o)
# Convolution with Ideal Etalon
if etalon_trans == 1:
lineshape4 = np.zeros(len(sigma))
for i in xrange(len(sigma)):
lineshape4[i] = conv_etalon(sigma[i],sigma_o)
### Plotting functions
hat = np.zeros(len(sigma))
for i in xrange(len(sigma)):
hat[i] = Etalon(R,A,df,sigma[i],sigma_o[10])
plt.plot(sigma,lineshape4)
plt.show()
|
mit
|
kylerbrown/scikit-learn
|
examples/cluster/plot_adjusted_for_chance_measures.py
|
286
|
4353
|
"""
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes - 1,
size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
|
bsd-3-clause
|
TimBizeps/BachelorAP
|
V303_LockInVerstärker/auswertung.py
|
1
|
1160
|
import matplotlib as mpl
mpl.use('pgf')
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from uncertainties import ufloat
import uncertainties.unumpy as unp
mpl.rcParams.update({
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
'pgf.texsystem': 'lualatex',
'pgf.preamble': r'\usepackage{unicode-math}\usepackage{siunitx}'
})
x, y, z = np.genfromtxt('messwerte.txt', unpack=True)
x=x*np.pi
def f(x, a, b):
return a*np.cos(x+b)
params, covariance = curve_fit(f, x, y)
errors = np.sqrt(np.diag(covariance))
print('a =', params[0], '±', errors[0])
print('b =', params[1]+2*np.pi, '±', errors[1])
x_plot=np.linspace(0, 6.9)
plt.plot(x, y, 'rx', label="Messwerte")
plt.plot(x_plot, f(x_plot, *params), 'b-', label='Fit-Funktion', linewidth=1)
plt.legend(loc="best")
plt.xticks([0, np.pi/2, np.pi, 3*np.pi/2, 2*np.pi],
[r"$0$", r"$\frac{1}{2} \pi$", r"$\pi$", r"$\frac{3}{2} \pi$", r"$2 \pi$"])
plt.xlabel(r'Phase $\phi$ im Bogenmaß')
plt.ylabel(r'Ausgangsspannung \,/\, $\si{\volt}$')
plt.title('Inaktiver Noise Generator')
plt.tight_layout()
plt.savefig("Plot.pdf")
|
gpl-3.0
|
hfp/tensorflow-xsmm
|
tensorflow/contrib/learn/python/learn/estimators/__init__.py
|
39
|
12688
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An estimator is a rule for calculating an estimate of a given quantity (deprecated).
These classes are deprecated and replaced with `tf.estimator`.
See [contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
# Estimators
* **Estimators** are used to train and evaluate TensorFlow models.
They support regression and classification problems.
* **Classifiers** are functions that have discrete outcomes.
* **Regressors** are functions that predict continuous values.
## Choosing the correct estimator
* For **Regression** problems use one of the following:
* `LinearRegressor`: Uses linear model.
* `DNNRegressor`: Uses DNN.
* `DNNLinearCombinedRegressor`: Uses Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest.
See tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator.
* `Estimator`: Use when you need a custom model.
* For **Classification** problems use one of the following:
* `LinearClassifier`: Multiclass classifier using Linear model.
* `DNNClassifier`: Multiclass classifier using DNN.
* `DNNLinearCombinedClassifier`: Multiclass classifier using Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest.
See tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator.
* `SVM`: Binary classifier using linear SVMs.
* `LogisticRegressor`: Use when you need custom model for binary
classification.
* `Estimator`: Use when you need custom model for N class classification.
## Pre-canned Estimators
Pre-canned estimators are machine learning estimators premade for general
purpose problems. If you need more customization, you can always write your
own custom estimator as described in the section below.
Pre-canned estimators are tested and optimized for speed and quality.
### Define the feature columns
Here are some possible types of feature columns used as inputs to a pre-canned
estimator.
Feature columns may vary based on the estimator used. So you can see which
feature columns are fed to each estimator in the below section.
```python
sparse_feature_a = sparse_column_with_keys(
column_name="sparse_feature_a", keys=["AB", "CD", ...])
embedding_feature_a = embedding_column(
sparse_id_column=sparse_feature_a, dimension=3, combiner="sum")
sparse_feature_b = sparse_column_with_hash_bucket(
column_name="sparse_feature_b", hash_bucket_size=1000)
embedding_feature_b = embedding_column(
sparse_id_column=sparse_feature_b, dimension=16, combiner="sum")
crossed_feature_a_x_b = crossed_column(
columns=[sparse_feature_a, sparse_feature_b], hash_bucket_size=10000)
real_feature = real_valued_column("real_feature")
real_feature_buckets = bucketized_column(
source_column=real_feature,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
```
### Create the pre-canned estimator
DNNClassifier, DNNRegressor, and DNNLinearCombinedClassifier are all pretty
similar to each other in how you use them. You can easily plug in an
optimizer and/or regularization to those estimators.
#### DNNClassifier
A classifier for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNClassifier(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNRegressor
A regressor for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNLinearCombinedClassifier
A classifier for TensorFlow Linear and DNN joined training models.
* Wide and deep model
* Multi class (2 by default)
```python
my_linear_features = [crossed_feature_a_x_b]
my_deep_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNLinearCombinedClassifier(
# Common settings
n_classes=n_classes,
weight_column_name=weight_column_name,
# Wide settings
linear_feature_columns=my_linear_features,
linear_optimizer=tf.train.FtrlOptimizer(...),
# Deep settings
dnn_feature_columns=my_deep_features,
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.AdagradOptimizer(...))
```
#### LinearClassifier
Train a linear model to classify instances into one of multiple possible
classes. When number of possible classes is 2, this is binary classification.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearClassifier(
feature_columns=my_features,
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### LinearRegressor
Train a linear regression model to predict a label value given observation of
feature values.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearRegressor(
feature_columns=my_features)
```
### LogisticRegressor
Logistic regression estimator for binary classification.
```python
# See tf.contrib.learn.Estimator(...) for details on model_fn structure
def my_model_fn(...):
pass
estimator = LogisticRegressor(model_fn=my_model_fn)
# Input builders
def input_fn_train:
pass
estimator.fit(input_fn=input_fn_train)
estimator.predict(x=x)
```
#### SVM - Support Vector Machine
Support Vector Machine (SVM) model for binary classification.
Currently only linear SVMs are supported.
```python
my_features = [real_feature, sparse_feature_a]
estimator = SVM(
example_id_column='example_id',
feature_columns=my_features,
l2_regularization=10.0)
```
#### DynamicRnnEstimator
An `Estimator` that uses a recurrent neural network with dynamic unrolling.
```python
problem_type = ProblemType.CLASSIFICATION # or REGRESSION
prediction_type = PredictionType.SINGLE_VALUE # or MULTIPLE_VALUE
estimator = DynamicRnnEstimator(problem_type,
prediction_type,
my_feature_columns)
```
### Use the estimator
There are two main functions for using estimators, one of which is for
training, and one of which is for evaluation.
You can specify different data sources for each one in order to use different
datasets for train and eval.
```python
# Input builders
def input_fn_train: # returns x, Y
...
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, Y
...
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
## Creating Custom Estimator
To create a custom `Estimator`, provide a function to `Estimator`'s
constructor that builds your model (`model_fn`, below):
```python
estimator = tf.contrib.learn.Estimator(
model_fn=model_fn,
model_dir=model_dir) # Where the model's data (e.g., checkpoints)
# are saved.
```
Here is a skeleton of this function, with descriptions of its arguments and
return values in the accompanying tables:
```python
def model_fn(features, targets, mode, params):
# Logic to do the following:
# 1. Configure the model via TensorFlow operations
# 2. Define the loss function for training/evaluation
# 3. Define the training operation/optimizer
# 4. Generate predictions
return predictions, loss, train_op
```
You may use `mode` and check against
`tf.contrib.learn.ModeKeys.{TRAIN, EVAL, INFER}` to parameterize `model_fn`.
In the Further Reading section below, there is an end-to-end TensorFlow
tutorial for building a custom estimator.
## Additional Estimators
There is an additional estimators under
`tensorflow.contrib.factorization.python.ops`:
* Gaussian mixture model (GMM) clustering
## Further reading
For further reading, there are several tutorials with relevant topics,
including:
* [Overview of linear models](../../../tutorials/linear/overview.md)
* [Linear model tutorial](../../../tutorials/wide/index.md)
* [Wide and deep learning tutorial](../../../tutorials/wide_and_deep/index.md)
* [Custom estimator tutorial](../../../tutorials/estimators/index.md)
* [Building input functions](../../../tutorials/input_fn/index.md)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.estimators.constants import ProblemType
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNEstimator
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNRegressor
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedEstimator
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedRegressor
from tensorflow.contrib.learn.python.learn.estimators.dynamic_rnn_estimator import DynamicRnnEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import BaseEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import Estimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import GraphRewriteSpec
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input_fn
from tensorflow.contrib.learn.python.learn.estimators.estimator import SKCompat
from tensorflow.contrib.learn.python.learn.estimators.head import binary_svm_head
from tensorflow.contrib.learn.python.learn.estimators.head import Head
from tensorflow.contrib.learn.python.learn.estimators.head import loss_only_head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_class_head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_label_head
from tensorflow.contrib.learn.python.learn.estimators.head import no_op_train_fn
from tensorflow.contrib.learn.python.learn.estimators.head import poisson_regression_head
from tensorflow.contrib.learn.python.learn.estimators.head import regression_head
from tensorflow.contrib.learn.python.learn.estimators.kmeans import KMeansClustering
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearClassifier
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearEstimator
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearRegressor
from tensorflow.contrib.learn.python.learn.estimators.logistic_regressor import LogisticRegressor
from tensorflow.contrib.learn.python.learn.estimators.metric_key import MetricKey
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModeKeys
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModelFnOps
from tensorflow.contrib.learn.python.learn.estimators.prediction_key import PredictionKey
from tensorflow.contrib.learn.python.learn.estimators.rnn_common import PredictionType
from tensorflow.contrib.learn.python.learn.estimators.run_config import ClusterConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import Environment
from tensorflow.contrib.learn.python.learn.estimators.run_config import RunConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import TaskType
from tensorflow.contrib.learn.python.learn.estimators.svm import SVM
|
apache-2.0
|
mrcslws/htmresearch
|
projects/union_pooling/experiments/tp_learning/tp_trained_tm.py
|
8
|
14905
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
import sys
import time
import os
import yaml
from optparse import OptionParser
import numpy
from pylab import rcParams
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib as mpl
from matplotlib.backends.backend_pdf import PdfPages
plt.ion()
from nupic.data.generators.pattern_machine import PatternMachine
from nupic.data.generators.sequence_machine import SequenceMachine
from nupic.algorithms.monitor_mixin.monitor_mixin_base import MonitorMixinBase
from htmresearch.frameworks.union_temporal_pooling.union_temporal_pooler_experiment import (
UnionTemporalPoolerExperiment)
_SHOW_PROGRESS_INTERVAL = 200
"""
Experiment 1
Runs UnionTemporalPooler on input from a Temporal Memory after training
on a long sequence
Enables learning in UnionTemporalPooler, and monitors growth of synapses
"""
# def experiment1():
paramDir = 'params/5_trainingPasses_1024_columns.yaml'
outputDir = 'results/'
params = yaml.safe_load(open(paramDir, 'r'))
options = {'plotVerbosity': 2, 'consoleVerbosity': 2}
plotVerbosity = 2
consoleVerbosity = 1
print "Running SDR overlap experiment...\n"
print "Params dir: {0}".format(paramDir)
print "Output dir: {0}\n".format(outputDir)
# Dimensionality of sequence patterns
patternDimensionality = params["patternDimensionality"]
# Cardinality (ON / true bits) of sequence patterns
patternCardinality = params["patternCardinality"]
# Length of sequences shown to network
sequenceLength = params["sequenceLength"]
# Number of sequences used. Sequences may share common elements.
numberOfSequences = params["numberOfSequences"]
# Number of sequence passes for training the TM. Zero => no training.
trainingPasses = params["trainingPasses"]
# Generate a sequence list and an associated labeled list (both containing a
# set of sequences separated by None)
print "\nGenerating sequences..."
patternAlphabetSize = sequenceLength * numberOfSequences
patternMachine = PatternMachine(patternDimensionality, patternCardinality,
patternAlphabetSize)
sequenceMachine = SequenceMachine(patternMachine)
numbers = sequenceMachine.generateNumbers(numberOfSequences, sequenceLength)
generatedSequences = sequenceMachine.generateFromNumbers(numbers)
sequenceLabels = [str(numbers[i + i*sequenceLength: i + (i+1)*sequenceLength])
for i in xrange(numberOfSequences)]
labeledSequences = []
for label in sequenceLabels:
for _ in xrange(sequenceLength):
labeledSequences.append(label)
labeledSequences.append(None)
def initializeNetwork():
tmParamOverrides = params["temporalMemoryParams"]
upParamOverrides = params["unionPoolerParams"]
# Set up the Temporal Memory and Union Pooler network
print "\nCreating network..."
experiment = UnionTemporalPoolerExperiment(tmParamOverrides, upParamOverrides)
return experiment
def runTMtrainingPhase(experiment):
# Train only the Temporal Memory on the generated sequences
if trainingPasses > 0:
print "\nTraining Temporal Memory..."
if consoleVerbosity > 0:
print "\nPass\tBursting Columns Mean\tStdDev\tMax"
for i in xrange(trainingPasses):
experiment.runNetworkOnSequences(generatedSequences,
labeledSequences,
tmLearn=True,
upLearn=None,
verbosity=consoleVerbosity,
progressInterval=_SHOW_PROGRESS_INTERVAL)
if consoleVerbosity > 0:
stats = experiment.getBurstingColumnsStats()
print "{0}\t{1}\t{2}\t{3}".format(i, stats[0], stats[1], stats[2])
# Reset the TM monitor mixin's records accrued during this training pass
# experiment.tm.mmClearHistory()
print
print MonitorMixinBase.mmPrettyPrintMetrics(
experiment.tm.mmGetDefaultMetrics())
print
def runTestPhase(experiment, tmLearn=False, upLearn=True, outputfileName='results/TemporalPoolingOutputs.pdf'):
print "\nRunning test phase..."
print "tmLearn: ", tmLearn
print "upLearn: ", upLearn
inputSequences = generatedSequences
inputCategories = labeledSequences
experiment.tm.mmClearHistory()
experiment.up.mmClearHistory()
experiment.tm.reset()
experiment.up.reset()
# Persistence levels across time
poolingActivationTrace = numpy.zeros((experiment.up._numColumns, 0))
# union SDR across time
activeCellsTrace = numpy.zeros((experiment.up._numColumns, 0))
# active cells in SP across time
activeSPTrace = numpy.zeros((experiment.up._numColumns, 0))
# number of connections for SP cells
connectionCountTrace = numpy.zeros((experiment.up._numColumns, 0))
# number of active inputs per SP cells
activeOverlapsTrace = numpy.zeros((experiment.up._numColumns, 0))
# number of predicted active inputs per SP cells
predictedActiveOverlapsTrace = numpy.zeros((experiment.up._numColumns, 0))
for _ in xrange(trainingPasses):
experiment.tm.reset()
experiment.up.reset()
for i in xrange(len(inputSequences)):
sensorPattern = inputSequences[i]
inputCategory = inputCategories[i]
if sensorPattern is None:
pass
else:
experiment.tm.compute(sensorPattern,
learn=tmLearn,
sequenceLabel=inputCategory)
activeCells, predActiveCells, burstingCols, = experiment.getUnionTemporalPoolerInput()
overlapsActive = experiment.up._calculateOverlap(activeCells)
overlapsPredictedActive = experiment.up._calculateOverlap(predActiveCells)
activeOverlapsTrace = numpy.concatenate((activeOverlapsTrace, overlapsActive.reshape((experiment.up._numColumns,1))), 1)
predictedActiveOverlapsTrace = numpy.concatenate((predictedActiveOverlapsTrace, overlapsPredictedActive.reshape((experiment.up._numColumns,1))), 1)
experiment.up.compute(activeCells,
predActiveCells,
learn=upLearn,
sequenceLabel=inputCategory)
currentPoolingActivation = experiment.up._poolingActivation.reshape((experiment.up._numColumns, 1))
poolingActivationTrace = numpy.concatenate((poolingActivationTrace, currentPoolingActivation), 1)
currentUnionSDR = numpy.zeros((experiment.up._numColumns, 1))
currentUnionSDR[experiment.up._unionSDR] = 1
activeCellsTrace = numpy.concatenate((activeCellsTrace, currentUnionSDR), 1)
currentSPSDR = numpy.zeros((experiment.up._numColumns, 1))
currentSPSDR[experiment.up._activeCells] = 1
activeSPTrace = numpy.concatenate((activeSPTrace, currentSPSDR), 1)
connectionCountTrace = numpy.concatenate((connectionCountTrace,
experiment.up._connectedCounts.reshape((experiment.up._numColumns, 1))), 1)
print "\nPass\tBursting Columns Mean\tStdDev\tMax"
stats = experiment.getBurstingColumnsStats()
print "{0}\t{1}\t{2}\t{3}".format(0, stats[0], stats[1], stats[2])
print
print MonitorMixinBase.mmPrettyPrintMetrics(\
experiment.tm.mmGetDefaultMetrics() + experiment.up.mmGetDefaultMetrics())
print
experiment.tm.mmClearHistory()
newConnectionCountTrace = numpy.zeros(connectionCountTrace.shape)
n = newConnectionCountTrace.shape[1]
newConnectionCountTrace[:,0:n-2] = connectionCountTrace[:,1:n-1] - connectionCountTrace[:,0:n-2]
# estimate fraction of shared bits across adjacent time point
unionSDRshared = experiment.up._mmComputeUnionSDRdiff()
bitLifeList = experiment.up._mmComputeBitLifeStats()
bitLife = numpy.array(bitLifeList)
# Plot SP outputs, UP persistence and UP outputs in testing phase
def showSequenceStartLine(ax, trainingPasses, sequenceLength):
for i in xrange(trainingPasses):
ax.vlines(i*sequenceLength, 0, ax1.get_ylim()[0], linestyles='--')
ncolShow = 50
f, (ax1, ax2, ax3, ax4) = plt.subplots(nrows=1,ncols=4)
ax1.imshow(activeSPTrace[1:ncolShow,:], cmap=cm.Greys,interpolation="nearest",aspect='auto')
showSequenceStartLine(ax1, trainingPasses, sequenceLength)
ax1.set_title('SP SDR')
ax1.set_ylabel('Columns')
ax2.imshow(poolingActivationTrace[1:ncolShow,:], cmap=cm.Greys, interpolation="nearest",aspect='auto')
showSequenceStartLine(ax2, trainingPasses, sequenceLength)
ax2.set_title('Persistence')
ax3.imshow(activeCellsTrace[1:ncolShow,:], cmap=cm.Greys, interpolation="nearest",aspect='auto')
showSequenceStartLine(ax3, trainingPasses, sequenceLength)
ax3.set_title('Union SDR')
ax4.imshow(newConnectionCountTrace[1:ncolShow,:], cmap=cm.Greys, interpolation="nearest",aspect='auto')
showSequenceStartLine(ax4, trainingPasses, sequenceLength)
ax4.set_title('New Connection #')
ax2.set_xlabel('Time (steps)')
pp = PdfPages(outputfileName)
pp.savefig()
pp.close()
def SDRsimilarity(SDR1, SDR2):
return len(SDR1 & SDR2) / float(len(SDR1 | SDR2) )
def getUnionSDRSimilarityCurve(activeCellsTrace, trainingPasses, sequenceLength, maxSeparation, skipBeginningElements=0):
similarityVsSeparation = numpy.zeros((trainingPasses, maxSeparation))
for rpts in xrange(trainingPasses):
for sep in xrange(maxSeparation):
similarity = []
for i in xrange(rpts*sequenceLength+skipBeginningElements, rpts*sequenceLength+sequenceLength-sep):
similarity.append(SDRsimilarity(activeCellsTrace[i], activeCellsTrace[i+sep]))
similarityVsSeparation[rpts, sep] = numpy.mean(similarity)
return similarityVsSeparation
def plotSDRsimilarityVsTemporalSeparation(similarityVsSeparationBefore, similarityVsSeparationAfter):
# plot SDR similarity as a function of temporal separation
f, (ax1,ax2) = plt.subplots(nrows=1,ncols=2)
rpt = 0
ax1.plot(similarityVsSeparationBefore[rpt,:],label='Before')
ax1.plot(similarityVsSeparationAfter[rpt,:],label='After')
ax1.set_xlabel('Separation in time between SDRs')
ax1.set_ylabel('SDRs overlap')
ax1.set_title('Initial Cycle')
ax1.set_ylim([0,1])
ax1.legend(loc='upper right')
rpt=4
ax2.plot(similarityVsSeparationBefore[rpt,:],label='Before')
ax2.plot(similarityVsSeparationAfter[rpt,:],label='After')
ax2.set_xlabel('Separation in time between SDRs')
ax2.set_ylabel('SDRs overlap')
ax2.set_title('Last Cycle')
ax2.set_ylim([0,1])
ax2.legend(loc='upper right')
f.savefig('results/UnionSDRoverlapVsTemporalSeparation.eps',format='eps')
def plotSimilarityMatrix(similarityMatrixBefore, similarityMatrixAfter):
f, (ax1, ax2) = plt.subplots(nrows=1,ncols=2)
im = ax1.imshow(similarityMatrixBefore[0:sequenceLength, 0:sequenceLength],interpolation="nearest")
ax1.set_xlabel('Time (steps)')
ax1.set_ylabel('Time (steps)')
ax1.set_title('Overlap - Before Learning')
im = ax2.imshow(similarityMatrixAfter[0:sequenceLength, 0:sequenceLength],interpolation="nearest")
ax2.set_xlabel('Time (steps)')
ax2.set_ylabel('Time (steps)')
ax2.set_title('Overlap - After Learning')
cax,kw = mpl.colorbar.make_axes([ax1, ax2])
plt.colorbar(im, cax=cax, **kw)
f.savefig('results/UnionSDRoverlapBeforeVsAfterLearning.eps',format='eps')
def calculateSimilarityMatrix(activeCellsTraceBefore, activeCellsTraceAfter):
nSteps = sequenceLength # len(activeCellsTraceBefore)
similarityMatrixBeforeAfter = numpy.zeros((nSteps, nSteps))
similarityMatrixBefore = numpy.zeros((nSteps, nSteps))
similarityMatrixAfter = numpy.zeros((nSteps, nSteps))
for i in xrange(nSteps):
for j in xrange(nSteps):
similarityMatrixBefore[i,j] = SDRsimilarity(activeCellsTraceBefore[i], activeCellsTraceBefore[j])
similarityMatrixAfter[i,j] = SDRsimilarity(activeCellsTraceAfter[i], activeCellsTraceAfter[j])
similarityMatrixBeforeAfter[i,j] = SDRsimilarity(activeCellsTraceBefore[i], activeCellsTraceAfter[j])
return (similarityMatrixBefore, similarityMatrixAfter, similarityMatrixBeforeAfter)
def plotTPRvsUPROverlap(similarityMatrix):
f = plt.figure()
im = plt.imshow(similarityMatrix[0:sequenceLength, 0:sequenceLength],
interpolation="nearest",aspect='auto', vmin=0, vmax=0.6)
plt.colorbar(im)
plt.xlabel('UPR over time')
plt.ylabel('TPR over time')
plt.title(' Overlap between UPR & TPR')
f.savefig('results/OverlapTPRvsUPR.eps',format='eps')
if __name__ == "__main__":
experiment = initializeNetwork()
runTMtrainingPhase(experiment)
runTestPhase(experiment, tmLearn=False, upLearn=False, outputfileName='results/TemporalPoolingBeforeLearning.pdf')
upBeforeLearning = copy.deepcopy(experiment.up)
runTestPhase(experiment, tmLearn=False, upLearn=True, outputfileName='results/TemporalPoolingDuringLearning.pdf')
upDuringLearning = copy.deepcopy(experiment.up)
runTestPhase(experiment, tmLearn=False, upLearn=False, outputfileName='results/TemporalPoolingAfterLearning.pdf')
upAfterLearning = copy.deepcopy(experiment.up)
maxSeparation = 30
skipBeginningElements = 10
activeCellsTraceBefore = upBeforeLearning._mmTraces['activeCells'].data
similarityVsSeparationBefore = getUnionSDRSimilarityCurve(activeCellsTraceBefore, trainingPasses, sequenceLength,
maxSeparation, skipBeginningElements)
activeCellsTraceAfter = upAfterLearning._mmTraces['activeCells'].data
similarityVsSeparationAfter = getUnionSDRSimilarityCurve(activeCellsTraceAfter, trainingPasses, sequenceLength,
maxSeparation, skipBeginningElements)
plotSDRsimilarityVsTemporalSeparation(similarityVsSeparationBefore, similarityVsSeparationAfter)
(similarityMatrixBefore, similarityMatrixAfter, similarityMatrixBeforeAfter) = \
calculateSimilarityMatrix(activeCellsTraceBefore, activeCellsTraceAfter)
plotTPRvsUPROverlap(similarityMatrixBeforeAfter)
plotSimilarityMatrix(similarityMatrixBefore, similarityMatrixAfter)
|
agpl-3.0
|
kingjr/meg_expectation_p3
|
toolbox/jr_toolbox/utils.py
|
1
|
24782
|
import pickle
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import warnings
from mne.stats import spatio_temporal_cluster_1samp_test
# STATS #######################################################################
def stat_fun(x, sigma=0, method='relative'):
from mne.stats import ttest_1samp_no_p
t_values = ttest_1samp_no_p(x, sigma=sigma, method=method)
t_values[np.isnan(t_values)] = 0
return t_values
def stats(X):
X = np.array(X)
X = X[:, :, None] if X.ndim == 2 else X
T_obs_, clusters, p_values, _ = spatio_temporal_cluster_1samp_test(
X, out_type='mask', stat_fun=stat_fun, n_permutations=2**12,
n_jobs=-1)
p_values_ = np.ones_like(X[0]).T
for cluster, pval in zip(clusters, p_values):
p_values_[cluster.T] = pval
return np.squeeze(p_values_)
def nested_analysis(X, df, condition, function=None, query=None,
single_trial=False, y=None, n_jobs=-1):
""" Apply a nested set of analyses.
Parameters
----------
X : np.array, shape(n_samples, ...)
Data array.
df : pandas.DataFrame
Condition DataFrame
condition : str | list
If string, get the samples for each unique value of df[condition]
If list, nested call nested_analysis.
query : str | None, optional
To select a subset of trial using pandas.DataFrame.query()
function : function
Computes across list of evoked. Must be of the form:
function(X[:], y[:])
y : np.array, shape(n_conditions)
n_jobs : int
Number of core to compute the function. Defaults to -1.
Returns
-------
scores : np.array, shape(...)
The results of the function
sub : dict()
Contains results of sub levels.
"""
import numpy as np
print condition
if isinstance(condition, str):
# Subselect data using pandas.DataFrame queries
sel = range(len(X)) if query is None else df.query(query).index
X = X.take(sel, axis=0)
y = np.array(df[condition][sel])
# Find unique conditions
values = list()
for ii in np.unique(y):
if (ii is not None) and (ii not in [np.nan]):
values.append(ii)
# Subsubselect for each unique condition
y_sel = [np.where(y == value)[0] for value in values]
# Mean condition:
X_mean = np.zeros(np.hstack((len(y_sel), X.shape[1:])))
y_mean = np.zeros(len(y_sel))
for ii, sel_ in enumerate(y_sel):
X_mean[ii, ...] = np.mean(X[sel_, ...], axis=0)
if isinstance(y[sel_[0]], str):
y_mean[ii] = ii
else:
y_mean[ii] = y[sel_[0]]
if single_trial:
X = X.take(np.hstack(y_sel), axis=0) # ERROR COME FROM HERE
y = y.take(np.hstack(y_sel), axis=0)
else:
X = X_mean
y = y_mean
# Store values to keep track
sub_list = dict(X=X_mean, y=y_mean, sel=sel, query=query,
condition=condition, values=values,
single_trial=single_trial)
elif isinstance(condition, list):
print 'list'
# If condition is a list, we must recall the function to gather
# the results of the lower levels
sub_list = list()
X_list = list() # FIXME use numpy array
for subcondition in condition:
scores, sub = nested_analysis(
X, df, subcondition['condition'], n_jobs=n_jobs,
function=subcondition.get('function', None),
query=subcondition.get('query', None))
X_list.append(scores)
sub_list.append(sub)
X = np.array(X_list)
if y is None:
y = np.arange(len(condition))
if len(y) != len(X):
raise ValueError('X and y must be of identical shape: ' +
'%s <> %s') % (len(X), len(y))
sub_list = dict(X=X, y=y, sub=sub_list, condition=condition)
# Default function
function = _default_analysis if function is None else function
scores = pairwise(X, y, function, n_jobs=n_jobs)
return scores, sub_list
def _default_analysis(X, y):
# Binary contrast
unique_y = np.unique(y)
if len(unique_y) == 2:
y = np.where(y == unique_y[0], 1, -1)
# Tile Y to across X dimension without allocating memory
Y = tile_memory_free(y, X.shape[1:])
return np.mean(X * Y, axis=0)
# Linear regression:
elif len(unique_y) > 2:
return repeated_spearman(X, y)
else:
raise RuntimeError('Please specify a function for this kind of data')
def tile_memory_free(y, shape):
"""
Tile vector along multiple dimension without allocating new memory.
Parameters
----------
y : np.array, shape (n,)
data
shape : np.array, shape (m),
Returns
-------
Y : np.array, shape (n, *shape)
"""
y = np.lib.stride_tricks.as_strided(y,
(np.prod(shape), y.size),
(0, y.itemsize)).T
return y.reshape(np.hstack((len(y), shape)))
def test_tile_memory_free():
from nose.tools import assert_equal
y = np.arange(100)
Y = tile_memory_free(y, 33)
assert_equal(y.shape[0], Y.shape[0])
np.testing.assert_array_equal(y, Y[:, 0], Y[:, -1])
def pairwise(X, y, func, n_jobs=-1):
"""Applies pairwise operations on two matrices using multicore:
function(X[:, jj, kk, ...], y[:, jj, kk, ...])
Parameters
----------
X : np.ndarray, shape(n, ...)
y : np.array, shape(n, ...) | shape(n,)
If shape == X.shape:
parallel(X[:, chunk], y[:, chunk ] for chunk in n_chunks)
If shape == X.shape[0]:
parallel(X[:, chunk], y for chunk in n_chunks)
func : function
n_jobs : int, optional
Number of parallel cpu.
Returns
-------
out : np.array, shape(func(X, y))
"""
import numpy as np
from mne.parallel import parallel_func
dims = X.shape
if y.shape[0] != dims[0]:
raise ValueError('X and y must have identical shapes')
X.resize([dims[0], np.prod(dims[1:])])
if y.ndim > 1:
Y = np.reshape(y, [dims[0], np.prod(dims[1:])])
parallel, pfunc, n_jobs = parallel_func(func, n_jobs)
n_cols = X.shape[1]
n_chunks = min(n_cols, n_jobs)
chunks = np.array_split(range(n_cols), n_chunks)
if y.ndim == 1:
out = parallel(pfunc(X[:, chunk], y) for chunk in chunks)
else:
out = parallel(pfunc(X[:, chunk], Y[:, chunk]) for chunk in chunks)
# size back in case higher dependencies
X.resize(dims)
# unpack
if isinstance(out[0], tuple):
return [np.reshape(out_, dims[1:]) for out_ in zip(*out)]
else:
return np.reshape(out, dims[1:])
def _dummy_function_1(x, y):
return x[0, :]
def _dummy_function_2(x, y):
return x[0, :], 0. * x[0, :]
def test_pairwise():
from nose.tools import assert_equal, assert_raises
n_obs = 20
n_dims1 = 5
n_dims2 = 10
y = np.linspace(0, 1, n_obs)
X = np.zeros((n_obs, n_dims1, n_dims2))
for dim1 in range(n_dims1):
for dim2 in range(n_dims2):
X[:, dim1, dim2] = dim1 + 10*dim2
# test size
score = pairwise(X, y, _dummy_function_1, n_jobs=2)
assert_equal(score.shape, X.shape[1:])
np.testing.assert_array_equal(score[:, 0], np.arange(n_dims1))
np.testing.assert_array_equal(score[0, :], 10 * np.arange(n_dims2))
# Test that X has not changed becaus of resize
np.testing.assert_array_equal(X.shape, [n_obs, n_dims1, n_dims2])
# test multiple out
score1, score2 = pairwise(X, y, _dummy_function_2, n_jobs=2)
np.testing.assert_array_equal(score1[:, 0], np.arange(n_dims1))
np.testing.assert_array_equal(score2[:, 0], 0 * np.arange(n_dims1))
# Test array vs vector
score1, score2 = pairwise(X, X, _dummy_function_2, n_jobs=1)
# test error check
assert_raises(ValueError, pairwise, X, y[1:], _dummy_function_1)
assert_raises(ValueError, pairwise, y, X, _dummy_function_1)
def share_clim(axes, clim=None):
"""Share clim across multiple axes
Parameters
----------
axes : plt.axes
clim : np.array | list, shape(2,), optional
Defaults is min and max across axes.clim.
"""
# Find min max of clims
if clim is None:
clim = list()
for ax in axes:
for im in ax.get_images():
clim += np.array(im.get_clim()).flatten().tolist()
clim = [np.min(clim), np.max(clim)]
# apply common clim
for ax in axes:
for im in ax.get_images():
im.set_clim(clim)
plt.draw()
def meg_to_gradmag(chan_types):
"""force separation of magnetometers and gradiometers"""
from mne.channels import read_ch_connectivity
if 'meg' in [chan['name'] for chan in chan_types]:
mag_connectivity, _ = read_ch_connectivity('neuromag306mag')
# FIXME grad connectivity? Need virtual sensor?
# grad_connectivity, _ = read_ch_connectivity('neuromag306grad')
chan_types = [dict(name='mag', connectivity=mag_connectivity),
dict(name='grad', connectivity='missing')] + \
[chan for chan in chan_types if chan['name'] != 'meg']
return chan_types
def scorer_auc(y_true, y_pred):
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import LabelBinarizer
"""Dedicated to 2class probabilistic outputs"""
le = LabelBinarizer()
y_true = le.fit_transform(y_true)
return roc_auc_score(y_true, y_pred)
def scorer_spearman(y_true, y_pred):
""""Dedicated to standard SVR"""
from scipy.stats import spearmanr
rho, p = spearmanr(y_true, y_pred[:, 0])
return rho
def repeated_spearman(X, y, dtype=None):
"""Computes spearman correlations between a vector and a matrix.
Parameters
----------
X : np.array, shape (n_samples, n_measures)
Data matrix onto which the vector is correlated.
y : np.array, shape (n_samples)
Data vector.
dtype : type, optional
Data type used to compute correlation values to optimize memory.
Returns
-------
rho : np.array, shape (n_measures)
"""
if X.ndim not in [1, 2] or y.ndim != 1 or X.shape[0] != y.shape[0]:
raise ValueError('y must be a vector, and X a matrix with an equal'
'number of rows.')
if X.ndim == 1:
X = X[:, None]
# Rank
X = np.argsort(X, axis=0)
y = np.argsort(y, axis=0)
# Double rank to ensure that normalization step of compute_corr
# (X -= mean(X)) remains an integer.
if (dtype is None and X.shape[0] < 2 ** 8) or\
(dtype in [int, np.int16, np.int32, np.int64]):
X *= 2
y *= 2
dtype = np.int16
else:
dtype = type(y[0])
X = np.array(X, dtype=dtype)
y = np.array(y, dtype=dtype)
return repeated_corr(X, y, dtype=type(y[0]))
def repeated_corr(X, y, dtype=float):
"""Computes pearson correlations between a vector and a matrix.
Adapted from Jona-Sassenhagen's PR #L1772 on mne-python.
Parameters
----------
y : np.array, shape (n_samples)
Data vector.
X : np.array, shape (n_samples, n_measures)
Data matrix onto which the vector is correlated.
dtype : type, optional
Data type used to compute correlation values to optimize memory.
Returns
-------
rho : np.array, shape (n_measures)
"""
from sklearn.utils.extmath import fast_dot
if X.ndim not in [1, 2] or y.ndim != 1 or X.shape[0] != y.shape[0]:
raise ValueError('y must be a vector, and X a matrix with an equal'
'number of rows.')
if X.ndim == 1:
X = X[:, None]
y -= np.array(y.mean(0), dtype=dtype)
X -= np.array(X.mean(0), dtype=dtype)
y_sd = y.std(0, ddof=1)
X_sd = X.std(0, ddof=1)[:, None if y.shape == X.shape else Ellipsis]
return (fast_dot(y.T, X) / float(len(y) - 1)) / (y_sd * X_sd)
def test_corr_functions():
from scipy.stats import spearmanr
test_corr(np.corrcoef, repeated_corr, 1)
test_corr(spearmanr, repeated_spearman, 0)
def test_corr(old_func, new_func, sel_item):
from nose.tools import assert_equal, assert_raises
n_obs = 20
n_dims = 10
y = np.linspace(0, 1, n_obs)
X = np.tile(y, [n_dims, 1]).T + np.random.randn(n_obs, n_dims)
rho_fast = new_func(X, y)
# test dimensionality
assert_equal(rho_fast.ndim, 1)
assert_equal(rho_fast.shape[0], n_dims)
# test data
rho_slow = np.ones(n_dims)
for dim in range(n_dims):
rho_slow[dim] = np.array(old_func(X[:, dim], y)).item(sel_item)
np.testing.assert_array_equal(rho_fast.shape, rho_slow.shape)
np.testing.assert_array_almost_equal(rho_fast, rho_slow)
# test errors
new_func(np.squeeze(X[:, 0]), y)
assert_raises(ValueError, new_func, y, X)
assert_raises(ValueError, new_func, X, y[1:])
# test dtype
X = np.argsort(X, axis=0) * 2 # ensure no bug at normalization
y = np.argsort(y, axis=0) * 2
rho_fast = new_func(X, y, dtype=int)
rho_slow = np.ones(n_dims)
for dim in range(n_dims):
rho_slow[dim] = np.array(old_func(X[:, dim], y)).item(sel_item)
np.testing.assert_array_almost_equal(rho_fast, rho_slow)
def save_to_dict(fname, data, overwrite=False):
"""Add pickle object to file without replacing its content using a
dictionary format which keys' correspond to the names of the variables.
Parameters
----------
fname : str
file name
data : dict
overwrite : bool
Default: False
"""
# Identify whether the file exists
if op.isfile(fname) and not overwrite:
data_dict = load_from_dict(fname)
else:
data_dict = dict()
for key in data.keys():
data_dict[key] = data[key]
# Save
with open(fname, 'w') as f:
pickle.dump(data_dict, f)
def load_from_dict(fname, varnames=None, out_type='dict'):
"""Load pickle object from file using a dictionary format which keys'
correspond to the names of the variables.
Parameters
----------
fname : str
file name
varnames : None | str | list (optional)
Variables to load. By default, load all of them.
out_type : str
'list', 'dict': default: dict
Returns
-------
vars : dict
dictionary of loaded variables which keys corresponds to varnames
"""
# Identify whether the file exists
if not op.isfile(fname):
raise RuntimeError('%s not found' % fname)
# Load original data
with open(fname) as f:
data_dict = pickle.load(f)
# Specify variables to load
if not varnames:
varnames = data_dict.keys()
elif varnames is str:
varnames = [varnames]
# Append result in a list
if out_type == 'dict':
out = dict()
for key in varnames:
out[key] = data_dict[key]
elif out_type == 'list':
out = list()
for key in varnames:
out.append(data_dict[key])
return out
def plot_eb(x, y, yerr, ax=None, alpha=0.3, color=None, line_args=dict(),
err_args=dict()):
"""
Parameters
----------
x : list | np.array()
y : list | np.array()
yerr : list | np.array() | float
ax
alpha
color
line_args
err_args
Returns
-------
ax
Adapted from http://tonysyu.github.io/plotting-error-bars.html#.VRE9msvmvEU
"""
ax = ax if ax is not None else plt.gca()
if color is None:
color = ax._get_lines.color_cycle.next()
if np.isscalar(yerr) or len(yerr) == len(y):
ymin = y - yerr
ymax = y + yerr
elif len(yerr) == 2:
ymin, ymax = yerr
ax.plot(x, y, color=color, **line_args)
ax.fill_between(x, ymax, ymin, alpha=alpha, color=color, **err_args)
return ax
def fill_betweenx_discontinuous(ax, ymin, ymax, x, freq=1, **kwargs):
"""Fill betwwen x even if x is discontinuous clusters
Parameters
----------
ax : axis
x : list
Returns
-------
ax : axis
"""
x = np.array(x)
min_gap = (1.1 / freq)
while np.any(x):
# If with single time point
if len(x) > 1:
xmax = np.where((x[1:] - x[:-1]) > min_gap)[0]
else:
xmax = [0]
# If continuous
if not np.any(xmax):
xmax = [len(x) - 1]
ax.fill_betweenx((ymin, ymax), x[0], x[xmax[0]], **kwargs)
# remove from list
x = x[(xmax[0] + 1):]
return ax
def resample_epochs(epochs, sfreq):
"""faster resampling"""
# from librosa import resample
# librosa.resample(channel, o_sfreq, sfreq, res_type=res_type)
from scipy.signal import resample
# resample
epochs._data = resample(epochs._data,
epochs._data.shape[2] / epochs.info['sfreq'] * sfreq,
axis=2)
# update metadata
epochs.info['sfreq'] = sfreq
epochs.times = (np.arange(epochs._data.shape[2],
dtype=np.float) / sfreq + epochs.times[0])
return epochs
def decim(inst, decim):
"""faster resampling"""
from mne.io.base import _BaseRaw
from mne.epochs import _BaseEpochs
if isinstance(inst, _BaseRaw):
inst._data = inst._data[:, ::decim]
inst.info['sfreq'] /= decim
inst._first_samps /= decim
inst.first_samp /= decim
inst._last_samps /= decim
inst.last_samp /= decim
inst._raw_lengths /= decim
inst._times = inst._times[::decim]
elif isinstance(inst, _BaseEpochs):
inst._data = inst._data[:, :, ::decim]
inst.info['sfreq'] /= decim
inst.times = inst.times[::decim]
return inst
def Evokeds_to_Epochs(inst, info=None, events=None):
"""Convert list of evoked into single epochs
Parameters
----------
inst: list
list of evoked objects.
info : dict
By default copy dict from inst[0]
events : np.array (dims: n, 3)
Returns
-------
epochs: epochs object"""
from mne.epochs import EpochsArray
from mne.evoked import Evoked
if (not(isinstance(inst, list)) or
not np.all([isinstance(x, Evoked) for x in inst])):
raise('inst mus be a list of evoked')
# concatenate signals
data = [x.data for x in inst]
# extract meta data
if info is None:
info = inst[0].info
if events is None:
n = len(inst)
events = np.c_[np.cumsum(np.ones(n)) * info['sfreq'],
np.zeros(n), np.ones(n)]
return EpochsArray(data, info, events=events, tmin=inst[0].times.min())
class cluster_stat(dict):
""" Cluster statistics """
def __init__(self, epochs, alpha=0.05, **kwargs):
"""
Parameters
----------
X : np.array (dims = n * space * time)
data array
alpha : float
significance level
Can take spatio_temporal_cluster_1samp_test() parameters.
"""
from mne.stats import spatio_temporal_cluster_1samp_test
# Convert lists of evoked in Epochs
if isinstance(epochs, list):
epochs = Evokeds_to_Epochs(epochs)
X = epochs._data.transpose((0, 2, 1))
# Apply contrast: n * space * time
# Run stats
self.T_obs_, clusters, p_values, _ = \
spatio_temporal_cluster_1samp_test(X, out_type='mask', **kwargs)
# Save sorted sig clusters
inds = np.argsort(p_values)
clusters = np.array(clusters)[inds, :, :]
p_values = p_values[inds]
inds = np.where(p_values < alpha)[0]
self.sig_clusters_ = clusters[inds, :, :]
self.p_values_ = p_values[inds]
# By default, keep meta data from first epoch
self.epochs = epochs
self.times = self.epochs[0].times
self.info = self.epochs[0].info
self.ch_names = self.epochs[0].ch_names
return
def _get_mask(self, i_clu):
"""
Selects or combine clusters
Parameters
----------
i_clu : int | list | array
cluster index. If list or array, returns average across multiple
clusters.
Returns
-------
mask : np.array
space_inds : np.array
times_inds : np.array
"""
# Select or combine clusters
if i_clu is None:
i_clu = range(len(self.sig_clusters_))
if isinstance(i_clu, int):
mask = self.sig_clusters_[i_clu]
else:
mask = np.sum(self.sig_clusters_[i_clu], axis=0)
# unpack cluster infomation, get unique indices
space_inds = np.where(np.sum(mask, axis=0))[0]
time_inds = np.where(np.sum(mask, axis=1))[0]
return mask, space_inds, time_inds
def plot_topo(self, i_clu=None, pos=None, **kwargs):
"""
Plots fmap of one or several clusters.
Parameters
----------
i_clu : int
cluster index
Can take evoked.plot_topomap() parameters.
Returns
-------
fig
"""
from mne import find_layout
from mne.viz import plot_topomap
# Channel positions
pos = find_layout(self.info).pos
# create topomap mask from sig cluster
mask, space_inds, time_inds = self._get_mask(i_clu)
if pos is None:
pos = find_layout(self.info).pos
# plot average test statistic and mark significant sensors
topo = self.T_obs_[time_inds, :].mean(axis=0)
fig = plot_topomap(topo, pos, **kwargs)
return fig
def plot_topomap(self, i_clu=None, **kwargs):
"""
Plots effect topography and highlights significant selected clusters.
Parameters
----------
i_clu : int
cluster index
Can take evoked.plot_topomap() parameters.
Returns
-------
fig
"""
# create topomap mask from sig cluster
mask, space_inds, time_inds = self._get_mask(i_clu)
# plot average test statistic and mark significant sensors
evoked = self.epochs.average()
evoked.data = self.T_obs_.transpose()
fig = evoked.plot_topomap(mask=np.transpose(mask), **kwargs)
return fig
def plot(self, plot_type='butterfly', i_clus=None, axes=None, show=True,
**kwargs):
"""
Plots effect time course and highlights significant selected clusters.
Parameters
----------
i_clus : None | list | int
cluster indices
plot_type : str
'butterfly' to plot differential response across all channels
'cluster' to plot cluster time course for each condition
Can take evoked.plot() parameters.
Returns
-------
fig
"""
import matplotlib.pyplot as plt
from mne.viz.utils import COLORS
times = self.times * 1000
# if axes is None:
if True:
fig = plt.figure()
fig.add_subplot(111)
axes = fig.axes[0]
# By default, plot separate clusters
if i_clus is None:
if plot_type == 'butterfly':
i_clus = [None]
else:
i_clus = range(len(self.sig_clusters_))
elif isinstance(i_clus, int):
i_clus = [i_clus]
# Time course
if plot_type == 'butterfly':
# Plot butterfly of difference
evoked = self.epochs.average()
fig = evoked.plot(axes=axes, show=False, **kwargs)
# Significant times
ymin, ymax = axes.get_ylim()
for i_clu in i_clus:
_, _, time_inds = self._get_mask(i_clu)
sig_times = times[time_inds]
fill_betweenx_discontinuous(axes, ymin, ymax, sig_times,
freq=(self.info['sfreq'] / 1000),
color='orange', alpha=0.3)
axes.legend(loc='lower right')
axes.set_ylim(ymin, ymax)
# add information
axes.axvline(0, color='k', linestyle=':', label='stimulus onset')
axes.set_xlim([times[0], times[-1]])
axes.set_xlabel('Time [s]')
axes.set_ylabel('Evoked magnetic fields [fT]')
if show:
plt.show()
return fig
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.