repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
hsuantien/scikit-learn | sklearn/cluster/tests/test_bicluster.py | 226 | 9457 | """Testing for Spectral Biclustering methods"""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.grid_search import ParameterGrid
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.cluster.bicluster import _scale_normalize
from sklearn.cluster.bicluster import _bistochastic_normalize
from sklearn.cluster.bicluster import _log_normalize
from sklearn.metrics import consensus_score
from sklearn.datasets import make_biclusters, make_checkerboard
class MockBiclustering(BaseEstimator, BiclusterMixin):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0])
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_matrix(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert_true(np.all(X != -1))
def _test_shape_indices(model):
# Test get_shape and get_indices on fitted model.
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert_equal(len(i_ind), m)
assert_equal(len(j_ind), n)
def test_spectral_coclustering():
# Test Dhillon's Spectral CoClustering on a simple problem.
param_grid = {'svd_method': ['randomized', 'arpack'],
'n_svd_vecs': [None, 20],
'mini_batch': [False, True],
'init': ['k-means++'],
'n_init': [10],
'n_jobs': [1]}
random_state = 0
S, rows, cols = make_biclusters((30, 30), 3, noise=0.5,
random_state=random_state)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_matrix(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(n_clusters=3,
random_state=random_state,
**kwargs)
model.fit(mat)
assert_equal(model.rows_.shape, (3, 30))
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def test_spectral_biclustering():
# Test Kluger methods on a checkerboard dataset.
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
random_state=0)
non_default_params = {'method': ['scale', 'log'],
'svd_method': ['arpack'],
'n_svd_vecs': [20],
'mini_batch': [True]}
for mat in (S, csr_matrix(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init='k-means++',
random_state=0,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get('method') == 'log':
# cannot take log of sparse matrix
assert_raises(ValueError, model.fit, mat)
continue
else:
model.fit(mat)
assert_equal(model.rows_.shape, (9, 30))
assert_equal(model.columns_.shape, (9, 30))
assert_array_equal(model.rows_.sum(axis=0),
np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0),
np.repeat(3, 30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100),
decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100),
decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(),
scaled.sum(axis=1).mean(),
decimal=1)
def test_scale_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_bistochastic_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize():
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(0)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise():
model = SpectralBiclustering(random_state=0)
vectors = np.array([[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
def test_project_and_cluster():
model = SpectralBiclustering(random_state=0)
data = np.array([[1, 1, 1],
[1, 1, 1],
[3, 6, 3],
[3, 6, 3]])
vectors = np.array([[1, 0],
[0, 1],
[0, 0]])
for mat in (data, csr_matrix(data)):
labels = model._project_and_cluster(data, vectors,
n_clusters=2)
assert_array_equal(labels, [0, 0, 1, 1])
def test_perfect_checkerboard():
raise SkipTest("This test is failing on the buildbot, but cannot"
" reproduce. Temporarily disabling it until it can be"
" reproduced and fixed.")
model = SpectralBiclustering(3, svd_method="arpack", random_state=0)
S, rows, cols = make_checkerboard((30, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((40, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((30, 40), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
def test_errors():
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(n_clusters=(3, 3, 3))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters='abc')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters=(3, 'abc'))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(svd_method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_best=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=3, n_best=4)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering()
data = np.arange(27).reshape((3, 3, 3))
assert_raises(ValueError, model.fit, data)
| bsd-3-clause |
bhargav/scikit-learn | examples/model_selection/plot_validation_curve.py | 141 | 1931 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.model_selection import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
lw = 2
plt.semilogx(param_range, train_scores_mean, label="Training score",
color="darkorange", lw=lw)
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2,
color="darkorange", lw=lw)
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="navy", lw=lw)
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2,
color="navy", lw=lw)
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
thientu/scikit-learn | sklearn/datasets/mlcomp.py | 289 | 3855 | # Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| bsd-3-clause |
Garrett-R/scikit-learn | sklearn/tree/tests/test_export.py | 37 | 2897 | """
Testing for export functions of decision trees (sklearn.tree.export).
"""
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
def test_graphviz_toy():
"""Check correctness of export_graphviz"""
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=1,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = "digraph Tree {\n" \
"0 [label=\"X[0] <= 0.0000\\ngini = 0.5\\n" \
"samples = 6\", shape=\"box\"] ;\n" \
"1 [label=\"gini = 0.0000\\nsamples = 3\\n" \
"value = [ 3. 0.]\", shape=\"box\"] ;\n" \
"0 -> 1 ;\n" \
"2 [label=\"gini = 0.0000\\nsamples = 3\\n" \
"value = [ 0. 3.]\", shape=\"box\"] ;\n" \
"0 -> 2 ;\n" \
"}"
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = "digraph Tree {\n" \
"0 [label=\"feature0 <= 0.0000\\ngini = 0.5\\n" \
"samples = 6\", shape=\"box\"] ;\n" \
"1 [label=\"gini = 0.0000\\nsamples = 3\\n" \
"value = [ 3. 0.]\", shape=\"box\"] ;\n" \
"0 -> 1 ;\n" \
"2 [label=\"gini = 0.0000\\nsamples = 3\\n" \
"value = [ 0. 3.]\", shape=\"box\"] ;\n" \
"0 -> 2 ;\n" \
"}"
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0)
contents1 = out.getvalue()
contents2 = "digraph Tree {\n" \
"0 [label=\"X[0] <= 0.0000\\ngini = 0.5\\n" \
"samples = 6\", shape=\"box\"] ;\n" \
"1 [label=\"(...)\", shape=\"box\"] ;\n" \
"0 -> 1 ;\n" \
"2 [label=\"(...)\", shape=\"box\"] ;\n" \
"0 -> 2 ;\n" \
"}"
assert_equal(contents1, contents2)
def test_graphviz_errors():
"""Check for errors of export_graphviz"""
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
if __name__ == "__main__":
import nose
nose.runmodule()
| bsd-3-clause |
humdings/pynance-legacy | pynance/portfolio/portfolio.py | 1 | 4347 | #The MIT License (MIT)
#
#Copyright (c) 2014 David Edwards
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
import time
import pandas as pd
from .. yahoo_pynance import HistoricalData, Stock
from . risk import Risk
class Portfolio():
'''
A portfolio over a given time period.
params:
tickers: (list)list of security ticker symbols
start_date: (datetime obj/string) starting date
end_date: (datetime obj/string) ending date
interval: (string) data frequency, default is daily
benchmark: (string) a benchmark for market returns
'''
def __init__(self, tickers, start_date, end_date, interval='d', benchmark='SPY'):
self.tickers = tickers
self.start_date = start_date
self.end_date = end_date
self.benchmark = benchmark
self.__dict__[benchmark] = HistoricalData(
benchmark, start_date, end_date, interval=interval
)
for sym in self.tickers:
h = HistoricalData(sym, start_date, end_date, interval=interval)
self.__dict__[sym] = h
time.sleep(.001) # Yahoo has a call rate limit
def __getitem__(self, item):
return self.__dict__[item]
def _field(self, field):
return pd.DataFrame({i: self[i][field] for i in self.tickers})
def bm_prices(self, adj_close=True):
''' benchmark prices '''
bm = self[self.benchmark]
if adj_close:
return bm['Adj Close']
return bm.Close
def bm_returns(self, adj_close=True):
''' benchmark returns '''
prices = self.bm_prices(adj_close=adj_close)
return prices.pct_change().dropna()
@property
def prices(self):
return self._field('Adj Close')
@property
def close_prices(self):
return self._field('Close')
@property
def open_prices(self):
return self._field('Open')
@property
def highs(self):
return self._field('High')
@property
def lows(self):
return self._field('Low')
@property
def volumes(self):
return self._field('Volume')
@property
def quotes(self):
data = [Stock(ticker) for ticker in self.tickers]
return pd.DataFrame({i.symbol: i.all for i in data})
def returns(self, adj_close=True):
if adj_close:
prices = self.prices
else:
prices = self.close_prices
return prices.pct_change().dropna()
def vwaps(self, days=None, adj_close=True):
''' Volume weighted average prices '''
volume = self.volumes
if adj_close:
prices = self.prices
else:
prices = self.close_prices
if days is not None:
prices = prices.tail(days)
volume = volume.tail(days)
return (volume * prices).sum() / volume.sum()
def beta(self, adj_close=True, days=None):
R = Risk(self.returns(adj_close=adj_close))
Rm = self.bm_returns(adj_close=adj_close)
return R.beta(Rm)
def alpha(self, adj_close=True, rfr=.02):
R = Risk(self.returns(adj_close=adj_close))
Rm = self.bm_returns(adj_close=adj_close)
return R.alpha(Rm, rfr)
| mit |
Cophy08/rodeo | rodeo/kernel.py | 8 | 7985 | # start compatibility with IPython Jupyter 4.0+
try:
from jupyter_client import BlockingKernelClient
except ImportError:
from IPython.kernel import BlockingKernelClient
# python3/python2 nonsense
try:
from Queue import Empty
except:
from queue import Empty
import atexit
import subprocess
import uuid
import time
import os
import sys
import json
__dirname = os.path.dirname(os.path.abspath(__file__))
vars_patch = """
import json
try:
import pandas as pd
except:
pd = None
def __get_variables():
if not pd:
print('[]')
variable_names = globals().keys()
data_frames = []
for v in variable_names:
if v.startswith("_"):
continue
if isinstance(globals()[v], pd.DataFrame):
data_frames.append({
"name": v,
"dtype": "DataFrame"
})
print(json.dumps(data_frames))
"""
class Kernel(object):
def __init__(self, active_dir, pyspark):
# kernel config is stored in a dot file with the active directory
config = os.path.join(active_dir, ".kernel-%s.json" % str(uuid.uuid4()))
# right now we're spawning a child process for IPython. we can
# probably work directly with the IPython kernel API, but the docs
# don't really explain how to do it.
log_file = None
if pyspark:
os.environ["IPYTHON_OPTS"] = "kernel -f %s" % config
pyspark = os.path.join(os.environ.get("SPARK_HOME"), "bin/pyspark")
spark_log = os.environ.get("SPARK_LOG", None)
if spark_log:
log_file = open(spark_log, "w")
spark_opts = os.environ.get("SPARK_OPTS", "")
args = [pyspark] + spark_opts.split() # $SPARK_HOME/bin/pyspark <SPARK_OPTS>
p = subprocess.Popen(args, stdout=log_file, stderr=log_file)
else:
args = [sys.executable, '-m', 'IPython', 'kernel', '-f', config]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# when __this__ process exits, we're going to remove the ipython config
# file and kill the ipython subprocess
atexit.register(p.terminate)
def remove_config():
if os.path.isfile(config):
os.remove(config)
atexit.register(remove_config)
# i found that if i tried to connect to the kernel immediately, so we'll
# wait until the config file exists before moving on
while os.path.isfile(config)==False:
time.sleep(0.1)
def close_file():
if log_file:
log_file.close()
atexit.register(close_file)
# fire up the kernel with the appropriate config
self.client = BlockingKernelClient(connection_file=config)
self.client.load_connection_file()
self.client.start_channels()
# load our monkeypatches...
self.client.execute("%matplotlib inline")
self.client.execute(vars_patch)
def _run_code(self, code, timeout=0.1):
# this function executes some code and waits for it to completely finish
# before returning. i don't think that this is neccessarily the best
# way to do this, but the IPython documentation isn't very helpful for
# this particular topic.
#
# 1) execute code and grab the ID for that execution thread
# 2) look for messages coming from the "iopub" channel (this is just a
# stream of output)
# 3) when we get a message that is one of the following, save relevant
# data to `data`:
# - execute_result - content from repr
# - stream - content from stdout
# - error - ansii encoded stacktrace
# the final piece is that we check for when the message indicates that
# the kernel is idle and the message's parent is the original execution
# ID (msg_id) that's associated with our executing code. if this is the
# case, we'll return the data and the msg_id and exit
msg_id = self.client.execute(code)
output = { "msg_id": msg_id, "output": None, "image": None, "error": None }
while True:
try:
reply = self.client.get_iopub_msg(timeout=timeout)
except Empty:
continue
if "execution_state" in reply['content']:
if reply['content']['execution_state']=="idle" and reply['parent_header']['msg_id']==msg_id:
if reply['parent_header']['msg_type']=="execute_request":
return output
elif reply['header']['msg_type']=="execute_result":
output['output'] = reply['content']['data'].get('text/plain', '')
elif reply['header']['msg_type']=="display_data":
output['image'] = reply['content']['data'].get('image/png', '')
elif reply['header']['msg_type']=="stream":
output['output'] = reply['content'].get('text', '')
elif reply['header']['msg_type']=="error":
output['error'] = "\n".join(reply['content']['traceback'])
def execute(self, code):
return self._run_code(code)
def complete(self, code, timeout=0.1):
# Call ipython kernel complete, wait for response with the correct msg_id,
# and construct appropriate UI payload.
# See below for an example response from ipython kernel completion for 'el'
#
# {
# 'parent_header':
# {u'username': u'ubuntu', u'version': u'5.0', u'msg_type': u'complete_request',
# u'msg_id': u'5222d158-ada8-474e-88d8-8907eb7cc74c', u'session': u'cda4a03d-a8a1-4e6c-acd0-de62d169772e',
# u'date': datetime.datetime(2015, 5, 7, 15, 25, 8, 796886)},
# 'msg_type': u'complete_reply',
# 'msg_id': u'a3a957d6-5865-4c6f-a0b2-9aa8da718b0d',
# 'content':
# {u'matches': [u'elif', u'else'], u'status': u'ok', u'cursor_start': 0, u'cursor_end': 2, u'metadata': {}},
# 'header':
# {u'username': u'ubuntu', u'version': u'5.0', u'msg_type': u'complete_reply',
# u'msg_id': u'a3a957d6-5865-4c6f-a0b2-9aa8da718b0d', u'session': u'f1491112-7234-4782-8601-b4fb2697a2f6',
# u'date': datetime.datetime(2015, 5, 7, 15, 25, 8, 803470)},
# 'buffers': [],
# 'metadata': {}
# }
#
msg_id = self.client.complete(code)
output = { "msg_id": msg_id, "output": None, "image": None, "error": None }
while True:
try:
reply = self.client.get_shell_msg(timeout=timeout)
except Empty:
continue
if "matches" in reply['content'] and reply['msg_type']=="complete_reply" and reply['parent_header']['msg_id']==msg_id:
results = []
for completion in reply['content']['matches']:
result = {
"value": completion,
"dtype": "---"
}
if "." in code:
result['text'] = ".".join(result['value'].split(".")[1:])
result["dtype"] = "function"
else:
# result['text'] = result['value'].replace(code, '', 1)
result['text'] = result['value']
result["dtype"] = "session variable" # type(globals().get(code)).__name__
results.append(result)
jsonresults = json.dumps(results)
output['output'] = jsonresults
return output
#else:
#Don't know what to do with the rest.
#I've observed parent_header msg_types: kernel_info_request, execute_request
#Just discard for now
def get_dataframes(self):
return self.execute("__get_variables()")
| bsd-2-clause |
kpespinosa/BuildingMachineLearningSystemsWithPython | ch11/demo_mds.py | 25 | 3724 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import os
import numpy as np
from matplotlib import pylab
from mpl_toolkits.mplot3d import Axes3D
from sklearn import linear_model, manifold, decomposition, datasets
logistic = linear_model.LogisticRegression()
from utils import CHART_DIR
np.random.seed(3)
# all examples will have three classes in this file
colors = ['r', 'g', 'b']
markers = ['o', 6, '*']
def plot_demo_1():
X = np.c_[np.ones(5), 2 * np.ones(5), 10 * np.ones(5)].T
y = np.array([0, 1, 2])
fig = pylab.figure(figsize=(10, 4))
ax = fig.add_subplot(121, projection='3d')
ax.set_axis_bgcolor('white')
mds = manifold.MDS(n_components=3)
Xtrans = mds.fit_transform(X)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], Xtrans[y == cl][:, 2], c=color, marker=marker, edgecolor='black')
pylab.title("MDS on example data set in 3 dimensions")
ax.view_init(10, -15)
mds = manifold.MDS(n_components=2)
Xtrans = mds.fit_transform(X)
ax = fig.add_subplot(122)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], c=color, marker=marker, edgecolor='black')
pylab.title("MDS on example data set in 2 dimensions")
filename = "mds_demo_1.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_iris_mds():
iris = datasets.load_iris()
X = iris.data
y = iris.target
# MDS
fig = pylab.figure(figsize=(10, 4))
ax = fig.add_subplot(121, projection='3d')
ax.set_axis_bgcolor('white')
mds = manifold.MDS(n_components=3)
Xtrans = mds.fit_transform(X)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], Xtrans[y == cl][:, 2], c=color, marker=marker, edgecolor='black')
pylab.title("MDS on Iris data set in 3 dimensions")
ax.view_init(10, -15)
mds = manifold.MDS(n_components=2)
Xtrans = mds.fit_transform(X)
ax = fig.add_subplot(122)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], c=color, marker=marker, edgecolor='black')
pylab.title("MDS on Iris data set in 2 dimensions")
filename = "mds_demo_iris.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
# PCA
fig = pylab.figure(figsize=(10, 4))
ax = fig.add_subplot(121, projection='3d')
ax.set_axis_bgcolor('white')
pca = decomposition.PCA(n_components=3)
Xtrans = pca.fit(X).transform(X)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], Xtrans[y == cl][:, 2], c=color, marker=marker, edgecolor='black')
pylab.title("PCA on Iris data set in 3 dimensions")
ax.view_init(50, -35)
pca = decomposition.PCA(n_components=2)
Xtrans = pca.fit_transform(X)
ax = fig.add_subplot(122)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], c=color, marker=marker, edgecolor='black')
pylab.title("PCA on Iris data set in 2 dimensions")
filename = "pca_demo_iris.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
if __name__ == '__main__':
plot_demo_1()
plot_iris_mds()
| mit |
bigdataelephants/scikit-learn | sklearn/utils/tests/test_utils.py | 23 | 6045 | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils.extmath import pinvh
from sklearn.utils.mocking import MockDataFrame
def test_make_rng():
"""Check the check_random_state utility function behavior"""
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_resample_noarg():
"""Border case not worth mentioning in doctests"""
assert_true(resample() is None)
def test_deprecated():
"""Test whether the deprecated decorator issues appropriate warnings"""
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample_value_errors():
"""Check that invalid arguments yield ValueError"""
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1], n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
| bsd-3-clause |
kevinhikali/ml_kevin | bottom/logistic_regression.py | 1 | 1151 | # -*- coding: utf-8 -*-
"""
@author: kevinhikali
@email: [email protected]
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from math import exp
# global variable
SampleTh = np.array([[2],
[5]])
# function
def h(th, x):
global SampleTh
return np.dot(th.transpose(), x)
nSample = 40
nParam = 2
SampleX = abs(np.random.rand(nParam, nSample))
# if bias required
SampleX[0, :] = np.ones(nSample)
BorderV = np.dot(SampleTh.transpose(), SampleX)[0]
SampleV = 4.5*np.ones(nSample) + 3.5*(np.random.rand(nSample)-0.5*np.ones(nSample))
SampleY = np.zeros(nSample)
for i in range(nSample):
if SampleV[i] > BorderV[i]:
SampleY[i] = 0
else:
SampleY[i] = 1
th = np.zeros(nParam)
x = np.zeros(nParam)
y = np.zeros(1)
fig = plt.figure()
plt_x = 0
for i in range(nSample):
global SampleTh
plt_x = SampleX[1, i]
x = SampleX[:, i]
# origin
y = h(SampleTh, x)
plot, = plt.plot(plt_x, y, 'go')
# sample
y = SampleY[i]
plot, = plt.plot(plt_x, y, 'bo')
# trained
y = h(th, x)
plot, = plt.plot(plt_x, y, 'ro')
plt.show()
| gpl-3.0 |
maxlikely/scikit-learn | sklearn/__check_build/__init__.py | 8 | 1670 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError(
"""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
bigdataelephants/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 30 | 4516 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=================================================================
Model selection with Probabilistic (PCA) and Factor Analysis (FA)
=================================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
print(__doc__)
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA()
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
xubenben/scikit-learn | sklearn/datasets/species_distributions.py | 198 | 7923 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs
from os.path import join
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
PY2 = True
except ImportError:
# Python 3
from urllib.request import urlopen
PY2 = False
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = join(DIRECTORY_URL, "samples.zip")
COVERAGES_URL = join(DIRECTORY_URL, "coverages.zip")
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = header[b'NODATA_value']
if nodata != -9999:
print(nodata)
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY2:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
else:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
--------
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
if not exists(join(data_home, DATA_ARCHIVE_NAME)):
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, join(data_home, DATA_ARCHIVE_NAME), compress=9)
else:
bunch = joblib.load(join(data_home, DATA_ARCHIVE_NAME))
return bunch
| bsd-3-clause |
zegnus/self-driving-car-machine-learning | p05-vehicle-detection/lesson_functions.py | 2 | 11796 | from classes import *
import matplotlib.image as mpimg
import numpy as np
import cv2
from skimage.feature import hog
def add_heat(heatmap, bbox_list):
# Iterate through list of bboxes
for box in bbox_list:
# Add += 1 for all pixels inside each bbox
# Assuming each "box" takes the form ((x1, y1), (x2, y2))
heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1
# Return updated heatmap
return heatmap # Iterate through list of bboxes
def apply_threshold(heatmap, threshold):
# Zero out pixels below the threshold
heatmap[heatmap <= threshold] = 0
# Return thresholded map
return heatmap
def draw_labeled_bboxes(img, labels):
# Iterate through all detected cars
for car_number in range(1, labels[1] + 1):
# Find pixels with each car_number label value
nonzero = (labels[0] == car_number).nonzero()
# Identify x and y values of those pixels
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Define a bounding box based on min/max x and y
bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))
# Draw the box on the image
cv2.rectangle(img, bbox[0], bbox[1], (0, 0, 255), 6)
# Return the image
return img
# Define a function to return HOG features and visualization
def get_hog_features(img, orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True):
# Call with two outputs if vis==True
if vis == True:
features, hog_image = hog(img, orientations=orient,
pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block),
transform_sqrt=True,
visualise=vis, feature_vector=feature_vec)
return features, hog_image
# Otherwise call with one output
else:
features = hog(img, orientations=orient,
pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block),
transform_sqrt=True,
visualise=vis, feature_vector=feature_vec)
return features
# Define a function to compute binned color features
def bin_spatial(img, size=(32, 32)):
color1 = cv2.resize(img[:, :, 0], size).ravel()
color2 = cv2.resize(img[:, :, 1], size).ravel()
color3 = cv2.resize(img[:, :, 2], size).ravel()
return np.hstack((color1, color2, color3))
# Define a function to compute color histogram features
# NEED TO CHANGE bins_range if reading .png files with mpimg!
def color_hist(img, nbins=32, bins_range=(0, 256)):
if np.max(img) <= 1.0:
bins_range = (0, 1.0)
# Compute the histogram of the color channels separately
channel1_hist = np.histogram(img[:, :, 0], bins=nbins, range=bins_range)
channel2_hist = np.histogram(img[:, :, 1], bins=nbins, range=bins_range)
channel3_hist = np.histogram(img[:, :, 2], bins=nbins, range=bins_range)
# Concatenate the histograms into a single feature vector
hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))
# Return the individual histograms, bin_centers and feature vector
return hist_features
# Define a function to extract features from a list of images
# Have this function call bin_spatial() and color_hist()
def extract_features(fep: FeatureExtractionParameters, imgs):
# Create a list to append feature vectors to
features = []
# Iterate through the list of images
for file in imgs:
# Read in each one by one
if file.endswith("png"):
image = cv2.imread(file) # reads in BGR
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
else:
image = mpimg.imread(file)
file_features = extract_features_from_image(fep, image)
features.append(file_features)
# horizontal flip
file_features = extract_features_from_image(fep, cv2.flip(image, 1))
features.append(file_features)
# Return list of feature vectors
return features
def convert_color(image, color_space='YCrCb'):
if color_space != 'RGB':
if color_space == 'HSV':
return cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
elif color_space == 'LUV':
return cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
elif color_space == 'HLS':
return cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
elif color_space == 'YUV':
return cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
elif color_space == 'YCrCb':
return cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)
else:
return np.copy(image)
def extract_features_from_image(fep: FeatureExtractionParameters, image):
image = cv2.resize(image, (fep.shape[0], fep.shape[1]))
feature_image = convert_color(image, fep.color_space)
file_features = []
if fep.spatial_feat is True:
spatial_features = bin_spatial(feature_image, size=fep.spatial_size)
file_features.append(spatial_features)
if fep.hist_feat is True:
# Apply color_hist()
hist_features = color_hist(feature_image, nbins=fep.hist_bins)
file_features.append(hist_features)
if fep.hog_feat is True:
# Call get_hog_features() with vis=False, feature_vec=True
if fep.hog_channel == 'ALL':
hog_features = []
for channel in range(feature_image.shape[2]):
hog_features.append(get_hog_features(feature_image[:, :, channel],
fep.orient, fep.pix_per_cell, fep.cell_per_block,
feature_vec=True))
hog_features = np.ravel(hog_features)
else:
hog_features = get_hog_features(feature_image[:, :, fep.hog_channel], fep.orient,
fep.pix_per_cell, fep.cell_per_block, feature_vec=True)
# Append the new feature vector to the features list
file_features.append(hog_features)
return np.concatenate(file_features)
# Define a function that takes an image,
# start and stop positions in both x and y,
# window size (x and y dimensions),
# and overlap fraction (for both x and y)
def slide_window(img, boxes=[], x_start_stop=[None, None], y_start_stop=[None, None],
xy_window=(64, 64), xy_overlap=(0.5, 0.5)):
# If x and/or y start/stop positions not defined, set to image size
if x_start_stop[0] == None:
x_start_stop[0] = 0
if x_start_stop[1] == None:
x_start_stop[1] = img.shape[1]
if y_start_stop[0] == None:
y_start_stop[0] = 0
if y_start_stop[1] == None:
y_start_stop[1] = img.shape[0]
# Compute the span of the region to be searched
xspan = x_start_stop[1] - x_start_stop[0]
yspan = y_start_stop[1] - y_start_stop[0]
# Compute the number of pixels per step in x/y
nx_pix_per_step = np.int(xy_window[0] * (1 - xy_overlap[0]))
ny_pix_per_step = np.int(xy_window[1] * (1 - xy_overlap[1]))
# Compute the number of windows in x/y
nx_buffer = np.int(xy_window[0] * (xy_overlap[0]))
ny_buffer = np.int(xy_window[1] * (xy_overlap[1]))
nx_windows = np.int((xspan - nx_buffer) / nx_pix_per_step)
ny_windows = np.int((yspan - ny_buffer) / ny_pix_per_step)
# Initialize a list to append window positions to
# Loop through finding x and y window positions
# Note: you could vectorize this step, but in practice
# you'll be considering windows one by one with your
# classifier, so looping makes sense
for ys in range(ny_windows):
for xs in range(nx_windows):
# Calculate window position
startx = xs * nx_pix_per_step + x_start_stop[0]
endx = startx + xy_window[0]
starty = ys * ny_pix_per_step + y_start_stop[0]
endy = starty + xy_window[1]
# Append window position to list
boxes.append(((startx, starty), (endx, endy)))
# Return the list of windows
return boxes
# Define a function to draw bounding boxes
def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):
# Make a copy of the image
imcopy = np.copy(img)
# Iterate through the bounding boxes
for bbox in bboxes:
# Draw a rectangle given bbox coordinates
cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)
# Return the image copy with boxes drawn
return imcopy
def find_cars(img, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins):
draw_img = np.copy(img)
img = img.astype(np.float32) / 255
img_tosearch = img[ystart:ystop, :, :]
ctrans_tosearch = convert_color(img_tosearch, conv='RGB2YCrCb')
if scale != 1:
imshape = ctrans_tosearch.shape
ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1] / scale), np.int(imshape[0] / scale)))
ch1 = ctrans_tosearch[:, :, 0]
ch2 = ctrans_tosearch[:, :, 1]
ch3 = ctrans_tosearch[:, :, 2]
# Define blocks and steps as above
nxblocks = (ch1.shape[1] // pix_per_cell) - cell_per_block + 1
nyblocks = (ch1.shape[0] // pix_per_cell) - cell_per_block + 1
nfeat_per_block = orient * cell_per_block ** 2
# 64 was the orginal sampling rate, with 8 cells and 8 pix per cell
window = 128
nblocks_per_window = (window // pix_per_cell) - cell_per_block + 1
cells_per_step = 2 # Instead of overlap, define how many cells to step
nxsteps = (nxblocks - nblocks_per_window) // cells_per_step
nysteps = (nyblocks - nblocks_per_window) // cells_per_step
# Compute individual channel HOG features for the entire image
hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)
for xb in range(nxsteps):
for yb in range(nysteps):
ypos = yb * cells_per_step
xpos = xb * cells_per_step
# Extract HOG for this patch
hog_feat1 = hog1[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()
hog_feat2 = hog2[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()
hog_feat3 = hog3[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()
hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))
xleft = xpos * pix_per_cell
ytop = ypos * pix_per_cell
# Extract the image patch
subimg = cv2.resize(ctrans_tosearch[ytop:ytop + window, xleft:xleft + window], (64, 64))
# Get color features
spatial_features = bin_spatial(subimg, size=spatial_size)
hist_features = color_hist(subimg, nbins=hist_bins)
# Scale features and make a prediction
test_features = X_scaler.transform(
np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1))
# test_features = X_scaler.transform(np.hstack((shape_feat, hist_feat)).reshape(1, -1))
test_prediction = svc.predict(test_features)
if test_prediction == 1:
xbox_left = np.int(xleft * scale)
ytop_draw = np.int(ytop * scale)
win_draw = np.int(window * scale)
cv2.rectangle(draw_img, (xbox_left, ytop_draw + ystart),
(xbox_left + win_draw, ytop_draw + win_draw + ystart), (0, 0, 255), 6)
return draw_img
| mit |
dmytroKarataiev/MachineLearning | capstone/forward_fill.py | 1 | 2022 | """Fill missing values"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
def fill_missing_values(df_data):
"""Fill missing values in data frame, in place."""
##########################################################
# TODO: Your code here (DO NOT modify anything else)
##########################################################
df_data.fillna(method="ffill", inplace=True)
df_data.fillna(method="bfill", inplace=True)
def symbol_to_path(symbol, base_dir="data"):
"""Return CSV file path given ticker symbol."""
return os.path.join(base_dir, "{}.csv".format(str(symbol)))
def get_data(symbols, dates):
"""Read stock data (adjusted close) for given symbols from CSV files."""
df_final = pd.DataFrame(index=dates)
if "SPY" not in symbols: # add SPY for reference, if absent
symbols.insert(0, "SPY")
for symbol in symbols:
file_path = symbol_to_path(symbol)
df_temp = pd.read_csv(file_path, parse_dates=True, index_col="Date",
usecols=["Date", "Adj Close"], na_values=["nan"])
df_temp = df_temp.rename(columns={"Adj Close": symbol})
df_final = df_final.join(df_temp)
if symbol == "SPY": # drop dates SPY did not trade
df_final = df_final.dropna(subset=["SPY"])
return df_final
def plot_data(df_data):
"""Plot stock data with appropriate axis labels."""
ax = df_data.plot(title="Stock Data", fontsize=2)
ax.set_xlabel("Date")
ax.set_ylabel("Price")
plt.show()
def test_run():
"""Function called by Test Run."""
# Read data
symbol_list = ["JAVA", "FAKE1", "FAKE2"] # list of symbols
start_date = "2005-12-31"
end_date = "2014-12-07"
dates = pd.date_range(start_date, end_date) # date range as index
df_data = get_data(symbol_list, dates) # get data for each symbol
# Fill missing values
fill_missing_values(df_data)
# Plot
plot_data(df_data)
if __name__ == "__main__":
test_run()
| mit |
schets/scikit-learn | examples/ensemble/plot_forest_iris.py | 335 | 6271 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
bartosh/zipline | zipline/utils/calendars/exchange_calendar_nyse.py | 6 | 5082 | #
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import time
from itertools import chain
from pandas.tseries.holiday import (
GoodFriday,
USLaborDay,
USPresidentsDay,
USThanksgivingDay,
)
from pytz import timezone
from .trading_calendar import TradingCalendar, HolidayCalendar
from .us_holidays import (
USNewYearsDay,
USMartinLutherKingJrAfter1998,
USMemorialDay,
USIndependenceDay,
Christmas,
MonTuesThursBeforeIndependenceDay,
FridayAfterIndependenceDayExcept2013,
USBlackFridayBefore1993,
USBlackFridayInOrAfter1993,
September11Closings,
HurricaneSandyClosings,
USNationalDaysofMourning,
ChristmasEveBefore1993,
ChristmasEveInOrAfter1993,
)
# Useful resources for making changes to this file:
# http://www.nyse.com/pdfs/closings.pdf
# http://www.stevemorse.org/jcal/whendid.html
class NYSEExchangeCalendar(TradingCalendar):
"""
Exchange calendar for NYSE
Open Time: 9:31 AM, US/Eastern
Close Time: 4:00 PM, US/Eastern
Regularly-Observed Holidays:
- New Years Day (observed on monday when Jan 1 is a Sunday)
- Martin Luther King Jr. Day (3rd Monday in January, only after 1998)
- Washington's Birthday (aka President's Day, 3rd Monday in February)
- Good Friday (two days before Easter Sunday)
- Memorial Day (last Monday in May)
- Independence Day (observed on the nearest weekday to July 4th)
- Labor Day (first Monday in September)
- Thanksgiving (fourth Thursday in November)
- Christmas (observed on nearest weekday to December 25)
NOTE: The NYSE does not observe the following US Federal Holidays:
- Columbus Day
- Veterans Day
Regularly-Observed Early Closes:
- July 3rd (Mondays, Tuesdays, and Thursdays, 1995 onward)
- July 5th (Fridays, 1995 onward, except 2013)
- Christmas Eve (except on Fridays, when the exchange is closed entirely)
- Day After Thanksgiving (aka Black Friday, observed from 1992 onward)
NOTE: Until 1993, the standard early close time for the NYSE was 2:00 PM.
From 1993 onward, it has been 1:00 PM.
Additional Irregularities:
- Closed from 9/11/2001 to 9/16/2001 due to terrorist attacks in NYC.
- Closed on 10/29/2012 and 10/30/2012 due to Hurricane Sandy.
- Closed on 4/27/1994 due to Richard Nixon's death.
- Closed on 6/11/2004 due to Ronald Reagan's death.
- Closed on 1/2/2007 due to Gerald Ford's death.
- Closed at 1:00 PM on Wednesday, July 3rd, 2013
- Closed at 1:00 PM on Friday, December 31, 1999
- Closed at 1:00 PM on Friday, December 26, 1997
- Closed at 1:00 PM on Friday, December 26, 2003
NOTE: The exchange was **not** closed early on Friday December 26, 2008,
nor was it closed on Friday December 26, 2014. The next Thursday Christmas
will be in 2025. If someone is still maintaining this code in 2025, then
we've done alright...and we should check if it's a half day.
"""
regular_early_close = time(13)
@property
def name(self):
return "NYSE"
@property
def tz(self):
return timezone('US/Eastern')
@property
def open_time(self):
return time(9, 31)
@property
def close_time(self):
return time(16)
@property
def regular_holidays(self):
return HolidayCalendar([
USNewYearsDay,
USMartinLutherKingJrAfter1998,
USPresidentsDay,
GoodFriday,
USMemorialDay,
USIndependenceDay,
USLaborDay,
USThanksgivingDay,
Christmas,
])
@property
def adhoc_holidays(self):
return list(chain(
September11Closings,
HurricaneSandyClosings,
USNationalDaysofMourning,
))
@property
def special_closes(self):
return [
(self.regular_early_close, HolidayCalendar([
MonTuesThursBeforeIndependenceDay,
FridayAfterIndependenceDayExcept2013,
USBlackFridayInOrAfter1993,
ChristmasEveInOrAfter1993
])),
(time(14), HolidayCalendar([
ChristmasEveBefore1993,
USBlackFridayBefore1993,
])),
]
@property
def special_closes_adhoc(self):
return [
(self.regular_early_close, [
'1997-12-26',
'1999-12-31',
'2003-12-26',
'2013-07-03'
])
]
| apache-2.0 |
btabibian/scikit-learn | examples/model_selection/plot_train_error_vs_test_error.py | 349 | 2577 | """
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
###############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
###############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
###############################################################################
# Plot results functions
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.semilogx(alphas, train_errors, label='Train')
plt.semilogx(alphas, test_errors, label='Test')
plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
plt.legend(loc='lower left')
plt.ylim([0, 1.2])
plt.xlabel('Regularization parameter')
plt.ylabel('Performance')
# Show estimated coef_ vs true coef
plt.subplot(2, 1, 2)
plt.plot(coef, label='True coef')
plt.plot(coef_, label='Estimated coef')
plt.legend()
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
plt.show()
| bsd-3-clause |
mattgiguere/scikit-learn | sklearn/utils/estimator_checks.py | 3 | 47556 | from __future__ import print_function
import warnings
import sys
import traceback
import inspect
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.lda import LDA
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.utils.validation import DataConversionWarning, NotFittedError
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
def _yield_classifier_checks(name, Classifier):
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_classifiers_pickle
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
# test if classifiers can cope with y.shape = (n_samples, 1)
yield check_classifiers_input_shapes
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_regressors_pickle
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
yield check_transformer_pickle
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
#yield check_parameters_default_constructible, name, Estimator
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check.
"""
name = Estimator.__class__.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_fast_parameters(estimator):
# speed up some estimators
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
# NMF
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_fast_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, 'predict_proba'):
estimator.predict_proba(X)
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_fast_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises(NotFittedError, transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = inspect.getargspec(func).args
assert_true(args[2] in ["y", "Y"])
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_fast_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = "0 feature(s) (shape=(3, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def check_transformer_pickle(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
n_samples, n_features = X.shape
X = StandardScaler().fit_transform(X)
X -= X.min()
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
if not hasattr(transformer, 'transform'):
return
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
random_state = np.random.RandomState(seed=12345)
y_ = np.vstack([y, 2 * y + random_state.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit(X, y_).transform(X)
pickled_transformer = pickle.dumps(transformer)
unpickled_transformer = pickle.loads(pickled_transformer)
pickled_X_pred = unpickled_transformer.transform(X)
assert_array_almost_equal(pickled_X_pred, X_pred)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_fast_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_unfitted(name, Estimator):
"""Check if NotFittedError is raised when calling predict and related
functions"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
assert_raises(NotFittedError, est.predict, X)
if hasattr(est, 'predict'):
assert_raises(NotFittedError, est.predict, X)
if hasattr(est, 'decision_function'):
assert_raises(NotFittedError, est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raises(NotFittedError, est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raises(NotFittedError, est.predict_log_proba, X)
def check_classifiers_input_shapes(name, Classifier):
iris = load_iris()
X, y = iris.data, iris.target
X, y = shuffle(X, y, random_state=1)
X = StandardScaler().fit_transform(X)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y)
y_pred = classifier.predict(X)
set_random_state(classifier)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
classifier.fit(X, y[:, np.newaxis])
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
assert_equal(len(w), 1, msg)
assert_array_equal(y_pred, classifier.predict(X))
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_fast_parameters(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_classifiers_pickle(name, Classifier):
X, y = make_blobs(random_state=0)
X, y = shuffle(X, y, random_state=7)
X -= X.min()
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
y_pred = classifier.predict(X)
pickled_classifier = pickle.dumps(classifier)
unpickled_classifier = pickle.loads(pickled_classifier)
pickled_y_pred = unpickled_classifier.predict(X)
assert_array_almost_equal(pickled_y_pred, y_pred)
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_fast_parameters(regressor_1)
set_fast_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
regressor.predict(X)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
print(regressor)
assert_greater(regressor.score(X, y_), 0.5)
def check_regressors_pickle(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
regressor.fit(X, y_)
y_pred = regressor.predict(X)
# store old predictions
pickled_regressor = pickle.dumps(regressor)
unpickled_regressor = pickle.loads(pickled_regressor)
pickled_y_pred = unpickled_regressor.predict(X)
assert_array_almost_equal(pickled_y_pred, y_pred)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_fast_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_auto_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='auto')
classifier.fit(X_train, y_train)
y_pred_auto = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_auto, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_auto_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='auto')
coef_auto = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
mean_weight = (1. / 3 + 1. / 2) / 2
class_weight = {
1: 1. / 3 / mean_weight,
-1: 1. / 2 / mean_weight,
}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_auto, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
if name == 'MiniBatchDictLearning' or name == 'MiniBatchSparsePCA':
# FIXME
# for MiniBatchDictLearning and MiniBatchSparsePCA
estimator.batch_size = 1
set_fast_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the orginal estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_fast_parameters(estimator_1)
set_fast_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LDA()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(isinstance(estimator.set_params(), Estimator))
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
args, varargs, kws, defaults = inspect.getargspec(init)
except TypeError:
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they need a non-default argument
args = args[2:]
else:
args = args[1:]
if args:
# non-empty list
assert_equal(len(args), len(defaults))
else:
return
for arg, default in zip(args, defaults):
if arg not in params.keys():
# deprecated parameter, not in get_params
assert_true(default is None)
continue
if isinstance(params[arg], np.ndarray):
assert_array_equal(params[arg], default)
else:
assert_equal(params[arg], default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV' 'RandomizedSearchCV'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
| bsd-3-clause |
winklerand/pandas | pandas/io/api.py | 14 | 1146 | """
Data IO api
"""
# flake8: noqa
from pandas.io.parsers import read_csv, read_table, read_fwf
from pandas.io.clipboards import read_clipboard
from pandas.io.excel import ExcelFile, ExcelWriter, read_excel
from pandas.io.pytables import HDFStore, get_store, read_hdf
from pandas.io.json import read_json
from pandas.io.html import read_html
from pandas.io.sql import read_sql, read_sql_table, read_sql_query
from pandas.io.sas import read_sas
from pandas.io.feather_format import read_feather
from pandas.io.parquet import read_parquet
from pandas.io.stata import read_stata
from pandas.io.pickle import read_pickle, to_pickle
from pandas.io.packers import read_msgpack, to_msgpack
from pandas.io.gbq import read_gbq
# deprecation, xref #13790
def Term(*args, **kwargs):
import warnings
warnings.warn("pd.Term is deprecated as it is not "
"applicable to user code. Instead use in-line "
"string expressions in the where clause when "
"searching in HDFStore",
FutureWarning, stacklevel=2)
from pandas.io.pytables import Term
return Term(*args, **kwargs)
| bsd-3-clause |
muxiaobai/CourseExercises | python/kaggle/competition/house-price/house_price_advanced.py | 1 | 10632 |
# coding: utf-8
# # 房价预测案例(进阶版)
#
# 这是进阶版的notebook。主要是为了比较几种模型框架。所以前面的特征工程部分内容,我也并没有做任何改动,重点都在后面的模型建造section
#
# ## Step 1: 检视源数据集
# In[71]:
import numpy as np
import pandas as pd
# #### 读入数据
#
# * 一般来说源数据的index那一栏没什么用,我们可以用来作为我们pandas dataframe的index。这样之后要是检索起来也省事儿。
#
# * 有人的地方就有鄙视链。跟知乎一样。Kaggle的也是个处处呵呵的危险地带。Kaggle上默认把数据放在*input*文件夹下。所以我们没事儿写个教程什么的,也可以依据这个convention来,显得自己很有逼格。。
# In[72]:
train_df = pd.read_csv('../input/train.csv', index_col=0)
test_df = pd.read_csv('../input/test.csv', index_col=0)
# #### 检视源数据
# In[73]:
train_df.head()
# 这时候大概心里可以有数,哪些地方需要人为的处理一下,以做到源数据更加好被process。
# ## Step 2: 合并数据
#
# 这么做主要是为了用DF进行数据预处理的时候更加方便。等所有的需要的预处理进行完之后,我们再把他们分隔开。
#
# 首先,SalePrice作为我们的训练目标,只会出现在训练集中,不会在测试集中(要不然你测试什么?)。所以,我们先把*SalePrice*这一列给拿出来,不让它碍事儿。
#
# 我们先看一下*SalePrice*长什么样纸:
# In[74]:
get_ipython().magic('matplotlib inline')
prices = pd.DataFrame({"price":train_df["SalePrice"], "log(price + 1)":np.log1p(train_df["SalePrice"])})
prices.hist()
# 可见,label本身并不平滑。为了我们分类器的学习更加准确,我们会首先把label给“平滑化”(正态化)
#
# 这一步大部分同学会miss掉,导致自己的结果总是达不到一定标准。
#
# 这里我们使用最有逼格的log1p, 也就是 log(x+1),避免了复值的问题。
#
# 记住哟,如果我们这里把数据都给平滑化了,那么最后算结果的时候,要记得把预测到的平滑数据给变回去。
#
# 按照“怎么来的怎么去”原则,log1p()就需要expm1(); 同理,log()就需要exp(), ... etc.
# In[75]:
y_train = np.log1p(train_df.pop('SalePrice'))
# 然后我们把剩下的部分合并起来
# In[76]:
all_df = pd.concat((train_df, test_df), axis=0)
# 此刻,我们可以看到all_df就是我们合在一起的DF
# In[77]:
all_df.shape
# 而*y_train*则是*SalePrice*那一列
# In[78]:
y_train.head()
# ## Step 3: 变量转化
#
# 类似『特征工程』。就是把不方便处理或者不unify的数据给统一了。
#
# #### 正确化变量属性
#
# 首先,我们注意到,*MSSubClass* 的值其实应该是一个category,
#
# 但是Pandas是不会懂这些事儿的。使用DF的时候,这类数字符号会被默认记成数字。
#
# 这种东西就很有误导性,我们需要把它变回成*string*
# In[79]:
all_df['MSSubClass'].dtypes
# In[80]:
all_df['MSSubClass'] = all_df['MSSubClass'].astype(str)
# 变成*str*以后,做个统计,就很清楚了
# In[81]:
all_df['MSSubClass'].value_counts()
# #### 把category的变量转变成numerical表达形式
#
# 当我们用numerical来表达categorical的时候,要注意,数字本身有大小的含义,所以乱用数字会给之后的模型学习带来麻烦。于是我们可以用One-Hot的方法来表达category。
#
# pandas自带的get_dummies方法,可以帮你一键做到One-Hot。
# In[82]:
pd.get_dummies(all_df['MSSubClass'], prefix='MSSubClass').head()
# 此刻*MSSubClass*被我们分成了12个column,每一个代表一个category。是就是1,不是就是0。
# 同理,我们把所有的category数据,都给One-Hot了
# In[83]:
all_dummy_df = pd.get_dummies(all_df)
all_dummy_df.head()
# #### 处理好numerical变量
#
# 就算是numerical的变量,也还会有一些小问题。
#
# 比如,有一些数据是缺失的:
# In[84]:
all_dummy_df.isnull().sum().sort_values(ascending=False).head(10)
# 可以看到,缺失最多的column是LotFrontage
# 处理这些缺失的信息,得靠好好审题。一般来说,数据集的描述里会写的很清楚,这些缺失都代表着什么。当然,如果实在没有的话,也只能靠自己的『想当然』。。
#
# 在这里,我们用平均值来填满这些空缺。
# In[85]:
mean_cols = all_dummy_df.mean()
mean_cols.head(10)
# In[86]:
all_dummy_df = all_dummy_df.fillna(mean_cols)
# 看看是不是没有空缺了?
# In[87]:
all_dummy_df.isnull().sum().sum()
# #### 标准化numerical数据
#
# 这一步并不是必要,但是得看你想要用的分类器是什么。一般来说,regression的分类器都比较傲娇,最好是把源数据给放在一个标准分布内。不要让数据间的差距太大。
#
# 这里,我们当然不需要把One-Hot的那些0/1数据给标准化。我们的目标应该是那些本来就是numerical的数据:
#
# 先来看看 哪些是numerical的:
# In[88]:
numeric_cols = all_df.columns[all_df.dtypes != 'object']
numeric_cols
# 计算标准分布:(X-X')/s
#
# 让我们的数据点更平滑,更便于计算。
#
# 注意:我们这里也是可以继续使用Log的,我只是给大家展示一下多种“使数据平滑”的办法。
# In[89]:
numeric_col_means = all_dummy_df.loc[:, numeric_cols].mean()
numeric_col_std = all_dummy_df.loc[:, numeric_cols].std()
all_dummy_df.loc[:, numeric_cols] = (all_dummy_df.loc[:, numeric_cols] - numeric_col_means) / numeric_col_std
# ## Step 4: 建立模型
#
# #### 把数据集分回 训练/测试集
# In[90]:
dummy_train_df = all_dummy_df.loc[train_df.index]
dummy_test_df = all_dummy_df.loc[test_df.index]
# In[91]:
dummy_train_df.shape, dummy_test_df.shape
# In[92]:
X_train = dummy_train_df.values
X_test = dummy_test_df.values
# #### 做一点高级的Ensemble
#
# 一般来说,单个分类器的效果真的是很有限。我们会倾向于把N多的分类器合在一起,做一个“综合分类器”以达到最好的效果。
#
# 我们从刚刚的试验中得知,Ridge(alpha=15)给了我们最好的结果
# In[93]:
from sklearn.linear_model import Ridge
ridge = Ridge(15)
# #### Bagging
#
# Bagging把很多的小分类器放在一起,每个train随机的一部分数据,然后把它们的最终结果综合起来(多数投票制)。
#
# Sklearn已经直接提供了这套构架,我们直接调用就行:
# In[94]:
from sklearn.ensemble import BaggingRegressor
from sklearn.model_selection import cross_val_score
# 在这里,我们用CV结果来测试不同的分类器个数对最后结果的影响。
#
# 注意,我们在部署Bagging的时候,要把它的函数base_estimator里填上你的小分类器(ridge)
# In[95]:
params = [1, 10, 15, 20, 25, 30, 40]
test_scores = []
for param in params:
clf = BaggingRegressor(n_estimators=param, base_estimator=ridge)
test_score = np.sqrt(-cross_val_score(clf, X_train, y_train, cv=10, scoring='neg_mean_squared_error'))
test_scores.append(np.mean(test_score))
# In[96]:
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline')
plt.plot(params, test_scores)
plt.title("n_estimator vs CV Error");
# 可见,前一个版本中,ridge最优结果也就是0.135;而这里,我们使用25个小ridge分类器的bagging,达到了低于0.132的结果。
# 当然了,你如果并没有提前测试过ridge模型,你也可以用Bagging自带的DecisionTree模型:
#
# 代码是一样的,把base_estimator给删去即可
# In[106]:
params = [10, 15, 20, 25, 30, 40, 50, 60, 70, 100]
test_scores = []
for param in params:
clf = BaggingRegressor(n_estimators=param)
test_score = np.sqrt(-cross_val_score(clf, X_train, y_train, cv=10, scoring='neg_mean_squared_error'))
test_scores.append(np.mean(test_score))
# In[107]:
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline')
plt.plot(params, test_scores)
plt.title("n_estimator vs CV Error");
# 咦,看来单纯用DT不太灵光的。最好的结果也就0.140
# #### Boosting
#
# Boosting比Bagging理论上更高级点,它也是揽来一把的分类器。但是把他们线性排列。下一个分类器把上一个分类器分类得不好的地方加上更高的权重,这样下一个分类器就能在这个部分学得更加“深刻”。
# In[97]:
from sklearn.ensemble import AdaBoostRegressor
# In[98]:
params = [10, 15, 20, 25, 30, 35, 40, 45, 50]
test_scores = []
for param in params:
clf = BaggingRegressor(n_estimators=param, base_estimator=ridge)
test_score = np.sqrt(-cross_val_score(clf, X_train, y_train, cv=10, scoring='neg_mean_squared_error'))
test_scores.append(np.mean(test_score))
# In[99]:
plt.plot(params, test_scores)
plt.title("n_estimator vs CV Error");
# Adaboost+Ridge在这里,25个小分类器的情况下,也是达到了接近0.132的效果。
# 同理,这里,你也可以不必输入Base_estimator,使用Adaboost自带的DT。
# In[108]:
params = [10, 15, 20, 25, 30, 35, 40, 45, 50]
test_scores = []
for param in params:
clf = BaggingRegressor(n_estimators=param)
test_score = np.sqrt(-cross_val_score(clf, X_train, y_train, cv=10, scoring='neg_mean_squared_error'))
test_scores.append(np.mean(test_score))
# In[109]:
plt.plot(params, test_scores)
plt.title("n_estimator vs CV Error");
# 看来我们也许要先tune一下我们的DT模型,再做这个实验。。:P
# #### XGBoost
#
# 最后,我们来看看巨牛逼的XGBoost,外号:Kaggle神器
#
# 这依旧是一款Boosting框架的模型,但是却做了很多的改进。
# In[100]:
from xgboost import XGBRegressor
# 用Sklearn自带的cross validation方法来测试模型
# In[101]:
params = [1,2,3,4,5,6]
test_scores = []
for param in params:
clf = XGBRegressor(max_depth=param)
test_score = np.sqrt(-cross_val_score(clf, X_train, y_train, cv=10, scoring='neg_mean_squared_error'))
test_scores.append(np.mean(test_score))
# 存下所有的CV值,看看哪个alpha值更好(也就是『调参数』)
# In[102]:
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline')
plt.plot(params, test_scores)
plt.title("max_depth vs CV Error");
# 惊了,深度为5的时候,错误率缩小到0.127
# 这就是为什么,浮躁的竞赛圈,人人都在用XGBoost :)
| gpl-2.0 |
mwaskom/lyman | lyman/tests/test_utils.py | 1 | 10091 | import os.path as op
import json
import numpy as np
import matplotlib.pyplot as plt
import nibabel as nib
from nipype.interfaces.base import traits, TraitedSpec, Bunch
import pytest
from .. import utils
class TestLymanInterface(object):
def test_inheriting_interface_behavior(self):
class TestInterface(utils.LymanInterface):
class input_spec(TraitedSpec):
a = traits.Int()
b = traits.Int()
class output_spec(TraitedSpec):
c = traits.Int()
def _run_interface(self, runtime):
c = self.inputs.a + self.inputs.b
self._results["c"] = c
return runtime
a, b = 2, 3
c = a + b
ifc = TestInterface(a=a, b=b)
assert ifc._results == {}
res = ifc.run()
assert ifc._results == {"c": c}
assert res.outputs.c == c
def test_output_definition(self, execdir):
ifc = utils.LymanInterface()
field_name = "out_file"
file_name = "out_file.txt"
abspath_file_name = execdir.join(file_name)
out = ifc.define_output(field_name, file_name)
assert out == abspath_file_name
assert ifc._results == {field_name: abspath_file_name}
def test_write_image(self, execdir):
field_name = "out_file"
file_name = "out_file.nii"
abspath_file_name = execdir.join(file_name)
data = np.random.randn(12, 8, 4)
affine = np.eye(4)
img = nib.Nifti1Image(data, affine)
# Test writing with an image
ifc = utils.LymanInterface()
img_out = ifc.write_image(field_name, file_name, img)
assert ifc._results == {field_name: abspath_file_name}
assert isinstance(img_out, nib.Nifti1Image)
assert np.array_equal(img_out.get_fdata(), data)
assert np.array_equal(img_out.affine, affine)
# Test writing with data and affine
ifc = utils.LymanInterface()
img_out = ifc.write_image(field_name, file_name, data, affine)
assert ifc._results == {field_name: abspath_file_name}
assert isinstance(img_out, nib.Nifti1Image)
assert np.array_equal(img_out.get_fdata(), data)
assert np.array_equal(img_out.affine, affine)
def test_write_visualization(self, execdir):
class Visualization(object):
self.closed = False
def savefig(self, fname, close):
with open(fname, "w"):
pass
if close:
self.closed = True
out_field = "test_file"
out_path = "test.png"
viz = Visualization()
ifc = utils.LymanInterface()
ifc.write_visualization(out_field, out_path, viz)
assert op.exists(out_path)
assert ifc._results == {out_field: op.join(execdir, out_path)}
assert viz.closed
out_field = "test_figure"
out_path = "test_figure.png"
f = plt.figure()
ifc = utils.LymanInterface()
ifc.write_visualization(out_field, out_path, f)
assert op.exists(out_path)
assert ifc._results == {out_field: op.join(execdir, out_path)}
viz = None
ifc.write_visualization(out_field, out_path, viz)
with pytest.raises(RuntimeError):
ifc.write_visualization(out_field, out_path, "bad type")
def test_submit_cmdline(self, execdir):
msg = "test"
runtime = Bunch(returncode=None,
cwd=str(execdir),
environ={"msg": msg})
ifc = utils.LymanInterface()
cmdline_a = ["echo", "$msg"]
runtime = ifc.submit_cmdline(runtime, cmdline_a)
stdout = "\n{}\n".format(msg + "\n")
assert runtime.stdout == stdout
stderr = "\n\n"
assert runtime.stderr == stderr
cmdline = "\n{}\n".format(" ".join(cmdline_a))
assert runtime.cmdline == cmdline
assert runtime.returncode == 0
with pytest.raises(RuntimeError):
ifc = utils.LymanInterface()
fname = "not_a_file"
cmdline_b = ["cat", fname]
runtime = ifc.submit_cmdline(runtime, cmdline_b)
stdout = stdout + "\n\n"
assert runtime.stdout == stdout
stderr = stderr + ("\ncat: {}: No such file or directory\n\n"
.format(fname))
assert runtime.stderr == stderr
cmdline = cmdline + "\n{}\n".format(" ".join(cmdline_b))
assert runtime.cmdline == cmdline
assert runtime.returncode == 1
class TestSaveInfo(object):
def test_save_info(self, execdir):
info = {"proc_dir": "../proc",
"data_dir": "../data"}
res = utils.SaveInfo(info_dict=info).run()
with open(res.outputs.info_file) as fid:
info_roundtrip = json.load(fid)
assert info == info_roundtrip
class TestImageMatrixConversion(object):
@pytest.fixture
def test_data(self):
seed = sum(map(ord, "image_to_matrix"))
rs = np.random.RandomState(seed)
vol_shape = 12, 8, 4
n_x, n_y, n_z = vol_shape
n_tp = 20
mask = np.arange(n_x * n_y * n_z).reshape(vol_shape) % 4
n_vox = mask.astype(bool).sum()
seg = rs.randint(0, 4, vol_shape)
data_4d = rs.normal(0, 1, (n_x, n_y, n_z, n_tp))
data_3d = data_4d[..., 0]
data_2d = data_4d[mask.astype(bool)].T
data_1d = data_3d[mask.astype(bool)].T
data_4d_masked = data_4d * mask.astype(bool)[:, :, :, None]
data_3d_masked = data_4d_masked[..., 0]
affine = np.array([[-2, 0, 0, 100],
[0, 1.9, -.6, 120],
[0, -.6, 1.9, -40],
[0, 0, 0, 1]])
img_4d = nib.Nifti1Image(data_4d, affine)
tr = 1.5
z_x, z_y, z_z, _ = img_4d.header.get_zooms()
img_4d.header.set_zooms((z_x, z_y, z_z, tr))
img_3d = nib.Nifti1Image(data_3d, affine)
mask_img = nib.Nifti1Image(mask, affine)
seg_img = nib.Nifti1Image(seg, affine)
test_data = dict(
mask=mask,
mask_img=mask_img,
seg=seg,
seg_img=seg_img,
affine=affine,
img_4d=img_4d,
img_3d=img_3d,
data_4d=data_4d,
data_4d_masked=data_4d_masked,
data_3d=data_3d,
data_3d_masked=data_3d_masked,
data_2d=data_2d,
data_1d=data_1d,
vol_shape=vol_shape,
n_vox=n_vox,
n_tp=n_tp,
tr=tr,
)
return test_data
def test_image_to_matrix(self, test_data):
img_4d = test_data["img_4d"]
img_3d = test_data["img_3d"]
mask_img = test_data["mask_img"]
seg_img = test_data["seg_img"]
# Test 4D image > 2D matrix with a mask
data_2d = utils.image_to_matrix(img_4d, mask_img)
assert np.array_equal(data_2d, test_data["data_2d"])
assert data_2d.shape == (test_data["n_tp"], test_data["n_vox"])
# Test 3D image > 1D matrix
data_1d = utils.image_to_matrix(img_3d, mask_img)
assert np.array_equal(data_1d, test_data["data_1d"])
assert data_1d.shape == (test_data["n_vox"],)
# Test segmentation value(s)
for use in [1, [1, 2]]:
data_seg = utils.image_to_matrix(img_4d, seg_img, use=use)
within_seg = np.isin(test_data["seg"], use).sum()
assert data_seg.shape == (test_data["n_tp"], within_seg)
def test_matrix_to_image(self, test_data):
data_2d = test_data["data_2d"]
data_1d = test_data["data_1d"]
mask_img = test_data["mask_img"]
seg_img = test_data["seg_img"]
n_x, n_y, n_z = test_data["vol_shape"]
n_tp = test_data["n_tp"]
# Test 2D matrix > 4D image
img_4d = utils.matrix_to_image(data_2d, mask_img)
assert np.array_equal(img_4d.get_fdata(), test_data["data_4d_masked"])
assert np.array_equal(img_4d.affine, mask_img.affine)
assert img_4d.shape == (n_x, n_y, n_z, n_tp)
# Test 1d matrix > 3D image
img_3d = utils.matrix_to_image(data_1d, mask_img)
assert np.array_equal(img_3d.get_fdata(), test_data["data_3d_masked"])
assert np.array_equal(img_3d.affine, mask_img.affine)
assert img_3d.shape == (n_x, n_y, n_z)
# Test segmentation value(s)
for use in [1, [1, 2]]:
within_seg = np.isin(test_data["seg"], use)
seg_mask_img = nib.Nifti1Image(within_seg.astype(np.uint8),
test_data["affine"])
data_seg = test_data["data_3d"][within_seg]
img_mask = utils.matrix_to_image(data_seg, seg_mask_img)
img_seg = utils.matrix_to_image(data_seg, seg_img, use=use)
assert np.array_equal(img_mask.get_fdata(), img_seg.get_fdata())
# Test affine and header from template image are used
img_template = test_data["img_4d"]
mask_img_nohdr = nib.Nifti1Image(test_data["mask"], np.eye(4))
img_4d = utils.matrix_to_image(data_2d, mask_img_nohdr, img_template)
assert np.array_equal(img_4d.affine, img_template.affine)
assert img_template.header == img_template.header
def test_check_mask(self):
data_shape = 12, 8, 4
x = np.random.normal(0, 1, data_shape)
mask_shape = data_shape[:-1]
mask_correct_3d = np.random.uniform(0, 1, data_shape) > .5
mask_correct_2d = np.random.uniform(0, 1, mask_shape) > .5
mask_bad_shape = np.random.uniform(0, 1, mask_shape).T > .5
mask_bad_dtype = np.random.uniform(0, 1, mask_shape)
utils.check_mask(mask_correct_3d, x)
utils.check_mask(mask_correct_2d, x)
with pytest.raises(ValueError):
utils.check_mask(mask_bad_shape, x)
with pytest.raises(TypeError):
utils.check_mask(mask_bad_dtype, x)
| bsd-3-clause |
looooo/panel-method | examples/plots/joukowsky_wing.py | 2 | 1228 | import matplotlib
matplotlib.use('Agg')import matplotlib.pyplot as plt
import numpy as np
import parabem
from parabem.vtk_export import CaseToVTK
from parabem.pan3d import DirichletDoublet0Source0Case3 as Case
from parabem.airfoil import Airfoil
def rib3d(airfoil, y_pos):
out = [parabem.PanelVector3(coo[0], y_pos, coo[1]) for coo in airfoil.coordinates[:-1]]
out.append(out[0])
out[0].wake_vertex = True
return out
n_x = 50
n_y = 10
a = Airfoil.joukowsky(-0.01+1j)
a.numpoints = n_x
print(a.coordinates)
ribs = [rib3d(a, y) for y in np.linspace(-5, 5, n_y)]
panels = []
for i in range(n_y)[:-1]:
for j in range(n_x):
panels.append(parabem.Panel3([ribs[i][j], ribs[i + 1][j], ribs[i + 1][j + 1], ribs[i][j + 1]]))
te = [rib[0] for rib in ribs]
print(te)
case = Case(panels, te)
case.farfield = 5
case.v_inf = parabem.Vector3(1, 0, 0.0)
case.create_wake(length=10000, count=3) # length, count
case.run()
for i in range(n_y):
plt.plot(*zip(*[[pan.center.x, pan.cp] for pan in case.panels[i * n_x : (i+1) * n_x]]),
marker="x")
# plt.show()
plt.plot(*zip(*a.coordinates))
# plt.show()
vtk_writer = CaseToVTK(case, "results/joukowsky3_d")
vtk_writer.write_panels(data_type="point") | gpl-3.0 |
ycaihua/scikit-learn | sklearn/metrics/regression.py | 27 | 9558 | """Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array-like of shape = [n_samples, n_outputs]
Ground truth (correct) target values.
y_pred : array-like of shape = [n_samples, n_outputs]
Estimated target values.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
y_type = 'continuous' if y_true.shape[1] == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred
def _average_and_variance(values, sample_weight=None):
"""
Compute the (weighted) average and variance.
Parameters
----------
values : array-like of shape = [n_samples] or [n_samples, n_outputs]
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average : float
The weighted average
variance : float
The weighted variance
"""
values = np.asarray(values)
if values.ndim == 1:
values = values.reshape((-1, 1))
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
if sample_weight.ndim == 1:
sample_weight = sample_weight.reshape((-1, 1))
average = np.average(values, weights=sample_weight)
variance = np.average((values - average)**2, weights=sample_weight)
return average, variance
def mean_absolute_error(y_true, y_pred, sample_weight=None):
"""Mean absolute error regression loss
Parameters
----------
y_true : array-like of shape = [n_samples] or [n_samples, n_outputs]
Ground truth (correct) target values.
y_pred : array-like of shape = [n_samples] or [n_samples, n_outputs]
Estimated target values.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
"""
y_type, y_true, y_pred = _check_reg_targets(y_true, y_pred)
return np.average(np.abs(y_pred - y_true).mean(axis=1),
weights=sample_weight)
def mean_squared_error(y_true, y_pred, sample_weight=None):
"""Mean squared error regression loss
Parameters
----------
y_true : array-like of shape = [n_samples] or [n_samples, n_outputs]
Ground truth (correct) target values.
y_pred : array-like of shape = [n_samples] or [n_samples, n_outputs]
Estimated target values.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
"""
y_type, y_true, y_pred = _check_reg_targets(y_true, y_pred)
return np.average(((y_pred - y_true) ** 2).mean(axis=1),
weights=sample_weight)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Parameters
----------
y_true : array-like of shape = [n_samples] or [n_samples, n_outputs]
Ground truth (correct) target values.
y_pred : array-like of shape = [n_samples] or [n_samples, n_outputs]
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred = _check_reg_targets(y_true, y_pred)
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred, sample_weight=None):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Parameters
----------
y_true : array-like
Ground truth (correct) target values.
y_pred : array-like
Estimated target values.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
The explained variance.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
"""
y_type, y_true, y_pred = _check_reg_targets(y_true, y_pred)
if y_type != "continuous":
raise ValueError("{0} is not supported".format(y_type))
_, numerator = _average_and_variance(y_true - y_pred, sample_weight)
_, denominator = _average_and_variance(y_true, sample_weight)
if denominator == 0.0:
if numerator == 0.0:
return 1.0
else:
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
return 0.0
return 1 - numerator / denominator
def r2_score(y_true, y_pred, sample_weight=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0, lower values are worse.
Parameters
----------
y_true : array-like of shape = [n_samples] or [n_samples, n_outputs]
Ground truth (correct) target values.
y_pred : array-like of shape = [n_samples] or [n_samples, n_outputs]
Estimated target values.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
The R^2 score.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred = _check_reg_targets(y_true, y_pred)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(dtype=np.float64)
if denominator == 0.0:
if numerator == 0.0:
return 1.0
else:
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
return 0.0
return 1 - numerator / denominator
| bsd-3-clause |
bjigmp/incubator-spot | spot-setup/migration/migrate_old_dns_data.py | 7 | 11399 | #!/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import subprocess
import fnmatch
import re
import pandas as pd
import datetime
from utilities import util
old_oa_path=sys.argv[1]
staging_db=sys.argv[2]
hdfs_staging_path=sys.argv[3]
dest_db = sys.argv[4]
impala_daemon = sys.argv[5]
# Execution example:
#./migrate_old_dns_data.py '/home/spotuser/incubator-spot_old/spot-oa' 'spot_migration' '/user/spotuser/spot_migration/' 'migrated' 'node01'
def main():
log = util.get_logger('SPOT.MIGRATE.DNS')
cur_path = os.path.dirname(os.path.realpath(__file__))
new_spot_path = os.path.split(os.path.split(cur_path)[0])[0]
new_oa_path = '{0}/spot-oa'.format(new_spot_path)
log.info('New Spot OA path: {0}'.format(new_oa_path))
old_spot_path = os.path.split(old_oa_path)[0]
log.info("Creating HDFS paths for Impala tables")
util.create_hdfs_folder('{0}/dns/scores'.format(hdfs_staging_path),log)
util.create_hdfs_folder('{0}/dns/dendro'.format(hdfs_staging_path),log)
util.create_hdfs_folder('{0}/dns/edge'.format(hdfs_staging_path),log)
util.create_hdfs_folder('{0}/dns/summary'.format(hdfs_staging_path),log)
util.create_hdfs_folder('{0}/dns/storyboard'.format(hdfs_staging_path),log)
util.create_hdfs_folder('{0}/dns/threat_dendro'.format(hdfs_staging_path),log)
util.execute_cmd('hdfs dfs -setfacl -R -m user:impala:rwx {0}'.format(hdfs_staging_path),log)
log.info("Creating Staging tables in Impala")
util.execute_cmd('impala-shell -i {0} --var=hpath={1} --var=dbname={2} -c -f create_dns_migration_tables.hql'.format(impala_daemon, hdfs_staging_path, staging_db),log)
## dns Ingest Summary
log.info('Processing Dns Ingest Summary')
ing_sum_path='{0}/data/dns/ingest_summary/'.format(old_oa_path)
pattern='is_??????.csv'
staging_table_name = 'dns_ingest_summary_tmp'
dest_table_name = 'dns_ingest_summary'
if os.path.exists(ing_sum_path):
for file in fnmatch.filter(os.listdir(ing_sum_path), pattern):
log.info('Processing file: {0}'.format(file))
filepath='{0}{1}'.format(ing_sum_path, file)
df = pd.read_csv(filepath)
s = df.iloc[:,0]
l_dates = list(s.unique())
l_dates = map(lambda x: x[0:10].strip(), l_dates)
l_dates = filter(lambda x: re.match('\d{4}[-/]\d{2}[-/]\d{1}', x), l_dates)
s_dates = set(l_dates)
for date_str in s_dates:
dt = datetime.datetime.strptime(date_str, '%Y-%m-%d')
log.info('Processing day: {0} {1} {2} {3}'.format(date_str, dt.year, dt.month, dt.day))
records = df[df['date'].str.contains(date_str)]
filename = "ingest_summary_{0}{1}{2}.csv".format(dt.year, dt.month, dt.day)
records.to_csv(filename, index=False)
load_cmd = "LOAD DATA LOCAL INPATH '{0}' OVERWRITE INTO TABLE {1}.{2};".format(filename, staging_db, staging_table_name)
util.execute_hive_cmd(load_cmd, log)
insert_cmd = "INSERT INTO {0}.{1} PARTITION (y={2}, m={3}, d={4}) SELECT tdate, total FROM {5}.{6}".format(dest_db, dest_table_name, dt.year, dt.month, dt.day, staging_db, staging_table_name)
util.execute_hive_cmd(insert_cmd, log)
os.remove(filename)
## Iterating days
days_path='{0}/data/dns/'.format(old_oa_path)
if os.path.exists(days_path):
for day_folder in fnmatch.filter(os.listdir(days_path), '2*'):
print day_folder
dt = datetime.datetime.strptime(day_folder, '%Y%m%d')
log.info('Processing day: {0} {1} {2} {3}'.format(day_folder, dt.year, dt.month, dt.day))
full_day_path = '{0}{1}'.format(days_path,day_folder)
## dns Scores and dns Threat Investigation
filename = '{0}/dns_scores.csv'.format(full_day_path)
if os.path.isfile(filename):
log.info("Processing Dns Scores")
staging_table_name = 'dns_scores_tmp'
dest_table_name = 'dns_scores'
load_cmd = "LOAD DATA LOCAL INPATH '{0}' OVERWRITE INTO TABLE {1}.{2};".format(filename, staging_db, staging_table_name)
util.execute_hive_cmd(load_cmd, log)
insert_cmd = "INSERT INTO {0}.{1} PARTITION (y={2}, m={3}, d={4}) SELECT frame_time, unix_tstamp, frame_len, ip_dst, dns_qry_name, dns_qry_class, dns_qry_type, dns_qry_rcode, ml_score, tld, query_rep, hh, dns_qry_class_name, dns_qry_type_name, dns_qry_rcode_name, network_context FROM {5}.{6}".format(dest_db, dest_table_name, dt.year, dt.month, dt.day, staging_db, staging_table_name)
util.execute_hive_cmd(insert_cmd, log)
log.info("Processing dns Threat Investigation")
staging_table_name = 'dns_scores_tmp'
dest_table_name = 'dns_threat_investigation'
insert_cmd = "INSERT INTO {0}.{1} PARTITION (y={2}, m={3}, d={4}) SELECT unix_tstamp, ip_dst, dns_qry_name, ip_sev, dns_sev FROM {5}.{6} WHERE ip_sev > 0 or dns_sev > 0;".format(dest_db, dest_table_name, dt.year, dt.month, dt.day, staging_db, staging_table_name)
util.execute_hive_cmd(insert_cmd, log)
# dns Dendro
log.info("Processing Dns Dendro")
staging_table_name = 'dns_dendro_tmp'
dest_table_name = 'dns_dendro'
pattern = 'dendro*.csv'
dendro_files = fnmatch.filter(os.listdir(full_day_path), pattern)
filename = '{0}/{1}'.format(full_day_path, pattern)
if len(dendro_files) > 0:
load_cmd = "LOAD DATA LOCAL INPATH '{0}' OVERWRITE INTO TABLE {1}.{2};".format(filename, staging_db, staging_table_name)
util.execute_hive_cmd(load_cmd, log)
insert_cmd = "INSERT INTO {0}.{1} PARTITION (y={2}, m={3}, d={4}) SELECT unix_timestamp('{5}', 'yyyyMMMdd'), dns_a, dns_qry_name, ip_dst FROM {6}.{7};".format(dest_db, dest_table_name, dt.year, dt.month, dt.day, day_folder, staging_db, staging_table_name)
util.execute_hive_cmd(insert_cmd, log)
## dns Edge
log.info("Processing Dns Edge")
staging_table_name = 'dns_edge_tmp'
dest_table_name = 'dns_edge'
pattern = 'edge*.csv'
edge_files = fnmatch.filter(os.listdir(full_day_path), pattern)
for file in edge_files:
parts = (re.findall("edge-(\S+).csv", file)[0]).split('_')
hh = int(parts[-2])
mn = int(parts[-1])
log.info("Processing File: {0} with HH: {1} and MN: {2}".format(file, hh, mn))
log.info("Removing double quotes File: {0}".format(file))
fixed_file = '{0}.fixed'.format(file)
sed_cmd = "sed 's/\"//g' {0}/{1} > {0}/{2}".format(full_day_path, file, fixed_file)
util.execute_cmd(sed_cmd, log)
filename = '{0}/{1}'.format(full_day_path, fixed_file)
load_cmd = "LOAD DATA LOCAL INPATH '{0}' OVERWRITE INTO TABLE {1}.{2};".format(filename, staging_db, staging_table_name)
util.execute_hive_cmd(load_cmd, log)
insert_cmd = "INSERT INTO {0}.{1} PARTITION (y={2}, m={3}, d={4}) SELECT unix_timestamp(frame_time, 'MMMMM dd yyyy H:mm:ss.SSS z'), frame_len, ip_dst, ip_src, dns_qry_name, '', '0', '0', dns_a, {5}, dns_qry_class, dns_qry_type, dns_qry_rcode, '0' FROM {6}.{7};".format(dest_db, dest_table_name, dt.year, dt.month, dt.day, hh, staging_db, staging_table_name)
util.execute_hive_cmd(insert_cmd, log)
os.remove(filename)
##dns_storyboard
log.info("Processing Dns Storyboard")
staging_table_name = 'dns_storyboard_tmp'
dest_table_name = 'dns_storyboard'
filename = '{0}/threats.csv'.format(full_day_path)
if os.path.isfile(filename):
load_cmd = "LOAD DATA LOCAL INPATH '{0}' OVERWRITE INTO TABLE {1}.{2};".format(filename, staging_db, staging_table_name)
util.execute_hive_cmd(load_cmd, log)
insert_cmd = "INSERT INTO {0}.{1} PARTITION (y={2}, m={3}, d={4}) SELECT ip_threat, dns_threat, title, text FROM {5}.{6};".format(dest_db, dest_table_name, dt.year, dt.month, dt.day, staging_db, staging_table_name)
util.execute_hive_cmd(insert_cmd, log)
# dns Threat Dendro
log.info("Processing Dns Threat Dendro")
staging_table_name = 'dns_threat_dendro_tmp'
dest_table_name = 'dns_threat_dendro'
pattern = 'threat-dendro*.csv'
threat_dendro_files = fnmatch.filter(os.listdir(full_day_path), pattern)
filename = '{0}/{1}'.format(full_day_path, pattern)
for file in threat_dendro_files:
ip = re.findall("threat-dendro-(\S+).csv", file)[0]
log.info("Processing File: {0} with IP:{1}".format(file, ip))
filename = '{0}/{1}'.format(full_day_path, file)
load_cmd = "LOAD DATA LOCAL INPATH '{0}' OVERWRITE INTO TABLE {1}.{2};".format(filename, staging_db, staging_table_name)
util.execute_hive_cmd(load_cmd, log)
insert_cmd = "INSERT INTO {0}.{1} PARTITION (y={2}, m={3}, d={4}) SELECT '{5}', total, dns_qry_name, ip_dst FROM {6}.{7} WHERE dns_qry_name is not null;".format(dest_db, dest_table_name, dt.year, dt.month, dt.day, ip, staging_db, staging_table_name)
util.execute_hive_cmd(insert_cmd, log)
log.info("Dropping staging tables")
util.execute_cmd('impala-shell -i {0} --var=dbname={1} -c -f drop_dns_migration_tables.hql'.format(impala_daemon, staging_db),log)
log.info("Removing staging tables' path in HDFS")
util.execute_cmd('hadoop fs -rm -r {0}/dns/'.format(hdfs_staging_path),log)
log.info("Moving CSV data to backup folder")
util.execute_cmd('mkdir {0}/data/backup/'.format(old_oa_path),log)
util.execute_cmd('cp -r {0}/data/dns/ {0}/data/backup/'.format(old_oa_path),log)
util.execute_cmd('rm -r {0}/data/dns/'.format(old_oa_path),log)
log.info("Invalidating metadata in Impala to refresh tables content")
util.execute_cmd('impala-shell -i {0} -q "INVALIDATE METADATA;"'.format(impala_daemon),log)
log.info("Creating ipynb template structure and copying advanced mode and threat investigation ipynb templates for each pre-existing day in the new Spot location")
ipynb_pipeline_path = '{0}/ipynb/dns/'.format(old_oa_path)
if os.path.exists(ipynb_pipeline_path):
for folder in os.listdir(ipynb_pipeline_path):
log.info("Creating ipynb dns folders in new Spot locaiton: {0}".format(folder))
util.execute_cmd('mkdir -p {0}/ipynb/dns/{1}/'.format(new_oa_path, folder),log)
log.info("Copying advanced mode ipynb template")
util.execute_cmd('cp {0}/oa/dns/ipynb_templates/Advanced_Mode_master.ipynb {0}/ipynb/dns/{1}/Advanced_Mode.ipynb'.format(new_oa_path, folder),log)
log.info("Copying threat investigation ipynb template")
util.execute_cmd('cp {0}/oa/dns/ipynb_templates/Threat_Investigation_master.ipynb {0}/ipynb/dns/{1}/Threat_Investigation.ipynb'.format(new_oa_path, folder),log)
if __name__=='__main__':
main()
| apache-2.0 |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/sklearn/neighbors/tests/test_approximate.py | 1 | 19051 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features).reshape(1, -1)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)].reshape(1, -1)
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)].reshape(1, -1)
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
@ignore_warnings
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [[1., 0.]]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slightly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
ignore_warnings(lshf.fit)(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consistent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
ignore_warnings(lshf.partial_fit)(X)
assert_array_equal(X, lshf._fit_X)
ignore_warnings(lshf.fit)(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
ignore_warnings(lshf.partial_fit)(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
ignore_warnings(lshf.fit)(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32).reshape(1, -1)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
ignore_warnings(lshf.fit)(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| mit |
iismd17/scikit-learn | benchmarks/bench_plot_approximate_neighbors.py | 244 | 6011 | """
Benchmark for approximate nearest neighbor search using
locality sensitive hashing forest.
There are two types of benchmarks.
First, accuracy of LSHForest queries are measured for various
hyper-parameters and index sizes.
Second, speed up of LSHForest queries compared to brute force
method in exact nearest neighbors is measures for the
aforementioned settings. In general, speed up is increasing as
the index size grows.
"""
from __future__ import division
import numpy as np
from tempfile import gettempdir
from time import time
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors.approximate import LSHForest
from sklearn.datasets import make_blobs
from sklearn.externals.joblib import Memory
m = Memory(cachedir=gettempdir())
@m.cache()
def make_data(n_samples, n_features, n_queries, random_state=0):
"""Create index and query data."""
print('Generating random blob-ish data')
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=100,
shuffle=True, random_state=random_state)
# Keep the last samples as held out query vectors: note since we used
# shuffle=True we have ensured that index and query vectors are
# samples from the same distribution (a mixture of 100 gaussians in this
# case)
return X[:n_samples], X[n_samples:]
def calc_exact_neighbors(X, queries, n_queries, n_neighbors):
"""Measures average times for exact neighbor queries."""
print ('Building NearestNeighbors for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
average_time = 0
t0 = time()
neighbors = nbrs.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time = (time() - t0) / n_queries
return neighbors, average_time
def calc_accuracy(X, queries, n_queries, n_neighbors, exact_neighbors,
average_time_exact, **lshf_params):
"""Calculates accuracy and the speed up of LSHForest."""
print('Building LSHForest for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
lshf = LSHForest(**lshf_params)
t0 = time()
lshf.fit(X)
lshf_build_time = time() - t0
print('Done in %0.3fs' % lshf_build_time)
accuracy = 0
t0 = time()
approx_neighbors = lshf.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time_approx = (time() - t0) / n_queries
for i in range(len(queries)):
accuracy += np.in1d(approx_neighbors[i], exact_neighbors[i]).mean()
accuracy /= n_queries
speed_up = average_time_exact / average_time_approx
print('Average time for lshf neighbor queries: %0.3fs' %
average_time_approx)
print ('Average time for exact neighbor queries: %0.3fs' %
average_time_exact)
print ('Average Accuracy : %0.2f' % accuracy)
print ('Speed up: %0.1fx' % speed_up)
return speed_up, accuracy
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Initialize index sizes
n_samples = [int(1e3), int(1e4), int(1e5), int(1e6)]
n_features = int(1e2)
n_queries = 100
n_neighbors = 10
X_index, X_query = make_data(np.max(n_samples), n_features, n_queries,
random_state=0)
params_list = [{'n_estimators': 3, 'n_candidates': 50},
{'n_estimators': 5, 'n_candidates': 70},
{'n_estimators': 10, 'n_candidates': 100}]
accuracies = np.zeros((len(n_samples), len(params_list)), dtype=float)
speed_ups = np.zeros((len(n_samples), len(params_list)), dtype=float)
for i, sample_size in enumerate(n_samples):
print ('==========================================================')
print ('Sample size: %i' % sample_size)
print ('------------------------')
exact_neighbors, average_time_exact = calc_exact_neighbors(
X_index[:sample_size], X_query, n_queries, n_neighbors)
for j, params in enumerate(params_list):
print ('LSHF parameters: n_estimators = %i, n_candidates = %i' %
(params['n_estimators'], params['n_candidates']))
speed_ups[i, j], accuracies[i, j] = calc_accuracy(
X_index[:sample_size], X_query, n_queries, n_neighbors,
exact_neighbors, average_time_exact, random_state=0, **params)
print ('')
print ('==========================================================')
# Set labels for LSHForest parameters
colors = ['c', 'm', 'y']
legend_rects = [plt.Rectangle((0, 0), 0.1, 0.1, fc=color)
for color in colors]
legend_labels = ['n_estimators={n_estimators}, '
'n_candidates={n_candidates}'.format(**p)
for p in params_list]
# Plot precision
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, accuracies[:, i], c=colors[i])
plt.plot(n_samples, accuracies[:, i], c=colors[i])
plt.ylim([0, 1.3])
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Precision@10")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Precision of first 10 neighbors with index size")
# Plot speed up
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, speed_ups[:, i], c=colors[i])
plt.plot(n_samples, speed_ups[:, i], c=colors[i])
plt.ylim(0, np.max(speed_ups))
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Speed up")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Relationship between Speed up and index size")
plt.show()
| bsd-3-clause |
lhilt/scipy | scipy/stats/_binned_statistic.py | 1 | 27463 | from __future__ import division, print_function, absolute_import
import numpy as np
from scipy._lib.six import callable, xrange
from scipy._lib._numpy_compat import suppress_warnings
from collections import namedtuple
__all__ = ['binned_statistic',
'binned_statistic_2d',
'binned_statistic_dd']
BinnedStatisticResult = namedtuple('BinnedStatisticResult',
('statistic', 'bin_edges', 'binnumber'))
def binned_statistic(x, values, statistic='mean',
bins=10, range=None):
"""
Compute a binned statistic for one or more sets of data.
This is a generalization of a histogram function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values (or set of values) within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a set of sequences - each the same shape as
`x`. If `values` is a set of sequences, the statistic will be computed
on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'std' : compute the standard deviation within each bin. This
is implicitly calculated with ddof=0.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width bins in the
given range (10 by default). If `bins` is a sequence, it defines the
bin edges, including the rightmost edge, allowing for non-uniform bin
widths. Values in `x` that are smaller than lowest bin edge are
assigned to bin number 0, values beyond the highest bin are assigned to
``bins[-1]``. If the bin edges are specified, the number of bins will
be, (nx = len(bins)-1).
range : (float, float) or [(float, float)], optional
The lower and upper range of the bins. If not provided, range
is simply ``(x.min(), x.max())``. Values outside the range are
ignored.
Returns
-------
statistic : array
The values of the selected statistic in each bin.
bin_edges : array of dtype float
Return the bin edges ``(length(statistic)+1)``.
binnumber: 1-D ndarray of ints
Indices of the bins (corresponding to `bin_edges`) in which each value
of `x` belongs. Same length as `values`. A binnumber of `i` means the
corresponding value is between (bin_edges[i-1], bin_edges[i]).
See Also
--------
numpy.digitize, numpy.histogram, binned_statistic_2d, binned_statistic_dd
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
First some basic examples:
Create two evenly spaced bins in the range of the given sample, and sum the
corresponding values in each of those bins:
>>> values = [1.0, 1.0, 2.0, 1.5, 3.0]
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
(array([ 4. , 4.5]), array([ 1., 4., 7.]), array([1, 1, 1, 2, 2]))
Multiple arrays of values can also be passed. The statistic is calculated
on each set independently:
>>> values = [[1.0, 1.0, 2.0, 1.5, 3.0], [2.0, 2.0, 4.0, 3.0, 6.0]]
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
(array([[ 4. , 4.5], [ 8. , 9. ]]), array([ 1., 4., 7.]),
array([1, 1, 1, 2, 2]))
>>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',
... bins=3)
(array([ 1., 2., 4.]), array([ 1., 2., 3., 4.]),
array([1, 2, 1, 2, 3]))
As a second example, we now generate some random data of sailing boat speed
as a function of wind speed, and then determine how fast our boat is for
certain wind speeds:
>>> windspeed = 8 * np.random.rand(500)
>>> boatspeed = .3 * windspeed**.5 + .2 * np.random.rand(500)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed,
... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7])
>>> plt.figure()
>>> plt.plot(windspeed, boatspeed, 'b.', label='raw data')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5,
... label='binned statistic of data')
>>> plt.legend()
Now we can use ``binnumber`` to select all datapoints with a windspeed
below 1:
>>> low_boatspeed = boatspeed[binnumber == 0]
As a final example, we will use ``bin_edges`` and ``binnumber`` to make a
plot of a distribution that shows the mean and distribution around that
mean per bin, on top of a regular histogram and the probability
distribution function:
>>> x = np.linspace(0, 5, num=500)
>>> x_pdf = stats.maxwell.pdf(x)
>>> samples = stats.maxwell.rvs(size=10000)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf,
... statistic='mean', bins=25)
>>> bin_width = (bin_edges[1] - bin_edges[0])
>>> bin_centers = bin_edges[1:] - bin_width/2
>>> plt.figure()
>>> plt.hist(samples, bins=50, density=True, histtype='stepfilled',
... alpha=0.2, label='histogram of data')
>>> plt.plot(x, x_pdf, 'r-', label='analytical pdf')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2,
... label='binned statistic of data')
>>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5)
>>> plt.legend(fontsize=10)
>>> plt.show()
"""
try:
N = len(bins)
except TypeError:
N = 1
if N != 1:
bins = [np.asarray(bins, float)]
if range is not None:
if len(range) == 2:
range = [range]
medians, edges, binnumbers = binned_statistic_dd(
[x], values, statistic, bins, range)
return BinnedStatisticResult(medians, edges[0], binnumbers)
BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult',
('statistic', 'x_edge', 'y_edge',
'binnumber'))
def binned_statistic_2d(x, y, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False):
"""
Compute a bidimensional binned statistic for one or more sets of data.
This is a generalization of a histogram2d function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values (or set of values) within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned along the first dimension.
y : (N,) array_like
A sequence of values to be binned along the second dimension.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a list of sequences - each with the same
shape as `x`. If `values` is such a list, the statistic will be
computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'std' : compute the standard deviation within each bin. This
is implicitly calculated with ddof=0.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* the number of bins for the two dimensions (nx = ny = bins),
* the number of bins in each dimension (nx, ny = bins),
* the bin edges for the two dimensions (x_edge = y_edge = bins),
* the bin edges in each dimension (x_edge, y_edge = bins).
If the bin edges are specified, the number of bins will be,
(nx = len(x_edge)-1, ny = len(y_edge)-1).
range : (2,2) array_like, optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
[[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
considered outliers and not tallied in the histogram.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (2,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section.
.. versionadded:: 0.17.0
Returns
-------
statistic : (nx, ny) ndarray
The values of the selected statistic in each two-dimensional bin.
x_edge : (nx + 1) ndarray
The bin edges along the first dimension.
y_edge : (ny + 1) ndarray
The bin edges along the second dimension.
binnumber : (N,) array of ints or (2,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogram2d, binned_statistic, binned_statistic_dd
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
If 'True': The returned `binnumber` is a shape (2,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(D_edge[i-1], D_edge[i]), where 'D' is either 'x' or 'y'.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
Calculate the counts with explicit bin-edges:
>>> x = [0.1, 0.1, 0.1, 0.6]
>>> y = [2.1, 2.6, 2.1, 2.1]
>>> binx = [0.0, 0.5, 1.0]
>>> biny = [2.0, 2.5, 3.0]
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny])
>>> ret.statistic
array([[ 2., 1.],
[ 1., 0.]])
The bin in which each sample is placed is given by the `binnumber`
returned parameter. By default, these are the linearized bin indices:
>>> ret.binnumber
array([5, 6, 5, 9])
The bin indices can also be expanded into separate entries for each
dimension using the `expand_binnumbers` parameter:
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny],
... expand_binnumbers=True)
>>> ret.binnumber
array([[1, 1, 1, 2],
[1, 2, 1, 1]])
Which shows that the first three elements belong in the xbin 1, and the
fourth into xbin 2; and so on for y.
"""
# This code is based on np.histogram2d
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = np.asarray(bins, float)
bins = [xedges, yedges]
medians, edges, binnumbers = binned_statistic_dd(
[x, y], values, statistic, bins, range,
expand_binnumbers=expand_binnumbers)
return BinnedStatistic2dResult(medians, edges[0], edges[1], binnumbers)
BinnedStatisticddResult = namedtuple('BinnedStatisticddResult',
('statistic', 'bin_edges',
'binnumber'))
def binned_statistic_dd(sample, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False):
"""
Compute a multidimensional binned statistic for a set of data.
This is a generalization of a histogramdd function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
sample : array_like
Data to histogram passed as a sequence of N arrays of length D, or
as an (N,D) array.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `sample`, or a list of sequences - each with the
same shape as `sample`. If `values` is such a list, the statistic
will be computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'std' : compute the standard deviation within each bin. This
is implicitly calculated with ddof=0.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : sequence or int, optional
The bin specification must be in one of the following forms:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... = bins).
* The number of bins for all dimensions (nx = ny = ... = bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitly in `bins`. Defaults to the minimum and maximum
values along each dimension.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (D,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section of
`binned_statistic_2d`.
.. versionadded:: 0.17.0
Returns
-------
statistic : ndarray, shape(nx1, nx2, nx3,...)
The values of the selected statistic in each two-dimensional bin.
bin_edges : list of ndarrays
A list of D arrays describing the (nxi + 1) bin edges for each
dimension.
binnumber : (N,) array of ints or (D,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open in each dimension. In
other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is
``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The
last bin, however, is ``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
If 'True': The returned `binnumber` is a shape (D,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.mplot3d import Axes3D
Take an array of 600 (x, y) coordinates as an example.
`binned_statistic_dd` can handle arrays of higher dimension `D`. But a plot
of dimension `D+1` is required.
>>> mu = np.array([0., 1.])
>>> sigma = np.array([[1., -0.5],[-0.5, 1.5]])
>>> multinormal = stats.multivariate_normal(mu, sigma)
>>> data = multinormal.rvs(size=600)
>>> data.shape
(600, 2)
Create bins and count how many arrays fall in each bin:
>>> N = 60
>>> x = np.linspace(-3, 3, N)
>>> y = np.linspace(-3, 4, N)
>>> ret = stats.binned_statistic_dd(data, np.arange(600), bins=[x, y],
... statistic='count')
>>> bincounts = ret.statistic
Set the volume and the location of bars:
>>> dx = x[1] - x[0]
>>> dy = y[1] - y[0]
>>> x, y = np.meshgrid(x[:-1]+dx/2, y[:-1]+dy/2)
>>> z = 0
>>> bincounts = bincounts.ravel()
>>> x = x.ravel()
>>> y = y.ravel()
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111, projection='3d')
>>> ax.bar3d(x, y, z, dx, dy, bincounts)
"""
known_stats = ['mean', 'median', 'count', 'sum', 'std','min','max']
if not callable(statistic) and statistic not in known_stats:
raise ValueError('invalid statistic %r' % (statistic,))
# `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`)
# `Dlen` is the length of elements along each dimension.
# This code is based on np.histogramdd
try:
# `sample` is an ND-array.
Dlen, Ndim = sample.shape
except (AttributeError, ValueError):
# `sample` is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
Dlen, Ndim = sample.shape
# Store initial shape of `values` to preserve it in the output
values = np.asarray(values)
input_shape = list(values.shape)
# Make sure that `values` is 2D to iterate over rows
values = np.atleast_2d(values)
Vdim, Vlen = values.shape
# Make sure `values` match `sample`
if(statistic != 'count' and Vlen != Dlen):
raise AttributeError('The number of `values` elements must match the '
'length of each `sample` dimension.')
nbin = np.empty(Ndim, int) # Number of bins in each dimension
edges = Ndim * [None] # Bin edges for each dim (will be 2D array)
dedges = Ndim * [None] # Spacing between edges (will be 2D array)
try:
M = len(bins)
if M != Ndim:
raise AttributeError('The dimension of bins must be equal '
'to the dimension of the sample x.')
except TypeError:
bins = Ndim * [bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
smin = np.atleast_1d(np.array(sample.min(axis=0), float))
smax = np.atleast_1d(np.array(sample.max(axis=0), float))
else:
smin = np.zeros(Ndim)
smax = np.zeros(Ndim)
for i in xrange(Ndim):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in xrange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in xrange(Ndim):
if np.isscalar(bins[i]):
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1)
else:
edges[i] = np.asarray(bins[i], float)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = np.diff(edges[i])
nbin = np.asarray(nbin)
# Compute the bin number each sample falls into, in each dimension
sampBin = [
np.digitize(sample[:, i], edges[i])
for i in xrange(Ndim)
]
# Using `digitize`, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
for i in xrange(Ndim):
# Find the rounding precision
decimal = int(-np.log10(dedges[i].min())) + 6
# Find which points are on the rightmost edge.
on_edge = np.where(np.around(sample[:, i], decimal) ==
np.around(edges[i][-1], decimal))[0]
# Shift these points one bin to the left.
sampBin[i][on_edge] -= 1
# Compute the sample indices in the flattened statistic matrix.
binnumbers = np.ravel_multi_index(sampBin, nbin)
result = np.empty([Vdim, nbin.prod()], float)
if statistic == 'mean':
result.fill(np.nan)
flatcount = np.bincount(binnumbers, None)
a = flatcount.nonzero()
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
result[vv, a] = flatsum[a] / flatcount[a]
elif statistic == 'std':
result.fill(0)
flatcount = np.bincount(binnumbers, None)
a = flatcount.nonzero()
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
#NOTE: take std dev by bin, np.std() is 2-pass and stable
result[vv, i] = np.std(values[vv, binnumbers == i])
elif statistic == 'count':
result.fill(0)
flatcount = np.bincount(binnumbers, None)
a = np.arange(len(flatcount))
result[:, a] = flatcount[np.newaxis, :]
elif statistic == 'sum':
result.fill(0)
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
a = np.arange(len(flatsum))
result[vv, a] = flatsum
elif statistic == 'median':
result.fill(np.nan)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = np.median(values[vv, binnumbers == i])
elif statistic == 'min':
result.fill(np.nan)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = np.min(values[vv, binnumbers == i])
elif statistic == 'max':
result.fill(np.nan)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = np.max(values[vv, binnumbers == i])
elif callable(statistic):
with np.errstate(invalid='ignore'), suppress_warnings() as sup:
sup.filter(RuntimeWarning)
try:
null = statistic([])
except Exception:
null = np.nan
result.fill(null)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = statistic(values[vv, binnumbers == i])
# Shape into a proper matrix
result = result.reshape(np.append(Vdim, nbin))
# Remove outliers (indices 0 and -1 for each bin-dimension).
core = tuple([slice(None)] + Ndim * [slice(1, -1)])
result = result[core]
# Unravel binnumbers into an ndarray, each row the bins for each dimension
if(expand_binnumbers and Ndim > 1):
binnumbers = np.asarray(np.unravel_index(binnumbers, nbin))
if np.any(result.shape[1:] != nbin - 2):
raise RuntimeError('Internal Shape Error')
# Reshape to have output (`reulst`) match input (`values`) shape
result = result.reshape(input_shape[:-1] + list(nbin-2))
return BinnedStatisticddResult(result, edges, binnumbers)
| bsd-3-clause |
PeteW/luigi | examples/pyspark_wc.py | 17 | 3388 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import luigi
from luigi.contrib.s3 import S3Target
from luigi.contrib.spark import SparkSubmitTask, PySparkTask
class InlinePySparkWordCount(PySparkTask):
"""
This task runs a :py:class:`luigi.contrib.spark.PySparkTask` task
over the target data in :py:meth:`wordcount.input` (a file in S3) and
writes the result into its :py:meth:`wordcount.output` target (a file in S3).
This class uses :py:meth:`luigi.contrib.spark.PySparkTask.main`.
Example luigi configuration::
[spark]
spark-submit: /usr/local/spark/bin/spark-submit
master: spark://spark.example.org:7077
# py-packages: numpy, pandas
"""
driver_memory = '2g'
executor_memory = '3g'
def input(self):
return S3Target("s3n://bucket.example.org/wordcount.input")
def output(self):
return S3Target('s3n://bucket.example.org/wordcount.output')
def main(self, sc, *args):
sc.textFile(self.input().path) \
.flatMap(lambda line: line.split()) \
.map(lambda word: (word, 1)) \
.reduceByKey(lambda a, b: a + b) \
.saveAsTextFile(self.output().path)
class PySparkWordCount(SparkSubmitTask):
"""
This task is the same as :py:class:`InlinePySparkWordCount` above but uses
an external python driver file specified in :py:meth:`app`
It runs a :py:class:`luigi.contrib.spark.SparkSubmitTask` task
over the target data in :py:meth:`wordcount.input` (a file in S3) and
writes the result into its :py:meth:`wordcount.output` target (a file in S3).
This class uses :py:meth:`luigi.contrib.spark.SparkSubmitTask.run`.
Example luigi configuration::
[spark]
spark-submit: /usr/local/spark/bin/spark-submit
master: spark://spark.example.org:7077
deploy-mode: client
"""
driver_memory = '2g'
executor_memory = '3g'
total_executor_cores = luigi.IntParameter(default=100, significant=False)
name = "PySpark Word Count"
app = 'wordcount.py'
def app_options(self):
# These are passed to the Spark main args in the defined order.
return [self.input().path, self.output().path]
def input(self):
return S3Target("s3n://bucket.example.org/wordcount.input")
def output(self):
return S3Target('s3n://bucket.example.org/wordcount.output')
'''
// Corresponding example Spark Job, running Word count with Spark's Python API
// This file would have to be saved into wordcount.py
import sys
from pyspark import SparkContext
if __name__ == "__main__":
sc = SparkContext()
sc.textFile(sys.argv[1]) \
.flatMap(lambda line: line.split()) \
.map(lambda word: (word, 1)) \
.reduceByKey(lambda a, b: a + b) \
.saveAsTextFile(sys.argv[2])
'''
| apache-2.0 |
nschaetti/EchoTorch | echotorch/utils/visualisation.py | 1 | 7213 | # -*- coding: utf-8 -*-
#
# Imports
import torch
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from echotorch.nn.PCACell import PCACell
from sklearn.decomposition import PCA
# Show singular values increasing aperture
def show_sv_for_increasing_aperture(conceptor, factor, title):
"""
Show singular values for increasing aperture
:param conceptors:
:param factor:
:param title:
:return:
"""
# Fig
fig = plt.figure()
ax = fig.gca()
ax.set_xlim(0, 100)
ax.set_ylim(0, 1.5)
ax.grid(True)
# For each aperture multiplication
for i in range(5):
# Compute SVD
_, S, _ = torch.svd(conceptor.get_C())
# Plot
ax.plot(S.numpy(), '--')
# Multiply all conceptor's aperture by 10
conceptor.multiply_aperture(factor)
# end for
# Show
ax.set_xlabel(u"Singular values")
ax.set_title(title)
plt.show()
plt.close()
# end show_sv_for_increasing_aperture
# Show conceptors similarity matrix
def show_conceptors_similarity_matrix(conceptors, title):
"""
Show conceptors similarity matrix
:param conceptors:
:param title:
:return:
"""
# Labels
labels = list()
# Similarity matrix
sim_matrix = torch.zeros(len(conceptors), len(conceptors))
for i, ca in enumerate(conceptors):
labels.append(ca.name)
for j, cb in enumerate(conceptors):
sim_matrix[i, j] = ca.sim(cb)
# end for
# end for
show_similarity_matrix(sim_matrix, title, labels, labels)
# end conceptors_similarity_matrix
# Show similarity matrix
def show_similarity_matrix(sim_matrix, title, column_labels=None, row_labels=None):
"""
Show similarity matrix
:param sim_matrix:
:return:
"""
# Get cmap
cmap = plt.cm.get_cmap('Greens')
fig = plt.figure()
plt.title(title)
ax = fig.add_subplot(111)
cax = ax.matshow(sim_matrix, interpolation='nearest', cmap=cmap)
fig.colorbar(cax)
ax.set_xticks(np.arange(len(row_labels)))
ax.set_yticks(np.arange(len(column_labels)))
ax.set_xticklabels(row_labels, rotation=90)
ax.set_yticklabels(column_labels)
plt.show()
# end show_similarity_matrix
# Plot singular values
def plot_singular_values(stats, title, xmin, xmax, ymin, ymax, log=False):
"""
Plot singular values
:param stats:
:param title:
:param timestep:
:param start:
:return:
"""
# Compute R (correlation matrix)
R = stats.t().mm(stats) / stats.shape[0]
# Compute singular values
U, S, V = torch.svd(R)
singular_values = S
# Compute singular values
if log:
singular_values = np.log10(singular_values)
# end if
# Fig
fig = plt.figure()
ax = fig.gca()
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.grid(True)
# For each plot
ax.plot(singular_values.numpy(), '--o')
ax.set_xlabel("Timesteps")
ax.set_title(title)
plt.show()
plt.close()
return singular_values, U
# end plot_singular_values
# Display neurons activities on a 3D plot
def neurons_activities_3d(stats, neurons, title, timesteps=-1, start=0):
"""
Display neurons activities on a 3D plot
:param stats:
:param neurons:
:param title:
:param timesteps:
:param start:
:return:
"""
# Fig
ax = plt.axes(projection='3d')
# Two by two
n_neurons = neurons.shape[0]
stats = stats[:, neurons].view(-1, n_neurons // 3, 3)
# Plot
if timesteps == -1:
time_length = stats.shape[0]
ax.plot3D(stats[:, :, 0].view(time_length).numpy(), stats[:, :, 1].view(time_length).numpy(), stats[:, :, 2].view(time_length).numpy(), 'o')
else:
ax.plot3D(stats[start:start + timesteps, :, 0].numpy(), stats[start:start + timesteps, :, 1].numpy(), stats[start:start + timesteps, :, 2].numpy(), 'o', lw=0.5)
# end if
ax.set_xlabel("X Axis")
ax.set_ylabel("Y Axis")
ax.set_zlabel("Z Axis")
ax.set_title(title)
plt.show()
plt.close()
# end neurons_activities_3d
# Display neurons activities on a 2D plot
def neurons_activities_2d(stats, neurons, title, colors, timesteps=-1, start=0):
"""
Display neurons activities on a 2D plot
:param stats:
:param neurons:
:param title:
:param timesteps:
:param start:
:return:
"""
# Fig
fig = plt.figure()
ax = fig.gca()
# Two by two
n_neurons = neurons.shape[0]
# For each plot
for i, stat in enumerate(stats):
# Stats
stat = stat[:, neurons].view(-1, n_neurons // 2, 2)
# Plot
if timesteps == -1:
ax.plot(stat[:, :, 0].numpy(), stat[:, :, 1].numpy(), colors[i])
else:
ax.plot(stat[start:start + timesteps, :, 0].numpy(), stat[start:start + timesteps, :, 1].numpy(), colors[i])
# end if
# end for
ax.set_xlabel("X Axis")
ax.set_ylabel("Y Axis")
ax.set_title(title)
plt.show()
plt.close()
# end neurons_activities_2d
# Display neurons activities
def neurons_activities_1d(stats, neurons, title, colors, xmin, xmax, ymin, ymax, timesteps=-1, start=0):
"""
Display neurons activities
:param stats:
:param neurons:
:return:
"""
# Fig
fig = plt.figure()
ax = fig.gca()
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.grid(True)
# For each neurons
for i, n in enumerate(neurons):
if timesteps == -1:
ax.plot(stats[:, n].numpy(), colors[i])
else:
ax.plot(stats[start:start + timesteps, n].numpy(), colors[i])
# end if
# end for
ax.set_xlabel("Timesteps")
ax.set_title(title)
plt.show()
plt.close()
# end neurons_activities_1d
# Show 3D time series
def show_3d_timeseries(ts, title):
"""
Show 3D timeseries
:param axis:
:param title:
:return:
"""
# Fig
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(ts[:, 0].numpy(), ts[:, 1].numpy(), ts[:, 2].numpy(), lw=0.5)
ax.set_xlabel("X Axis")
ax.set_ylabel("Y Axis")
ax.set_zlabel("Z Axis")
ax.set_title(title)
plt.show()
plt.close()
# end show_3d_timeseries
# Show 2D time series
def show_2d_timeseries(ts, title):
"""
Show 2D timeseries
:param ts:
:param title:
:return:
"""
# Fig
fig = plt.figure()
ax = fig.gca()
ax.plot(ts[:, 0].numpy(), ts[:, 1].numpy(), lw=0.5)
ax.set_xlabel("X Axis")
ax.set_ylabel("Y Axis")
ax.set_title(title)
plt.show()
plt.close()
# end show_2d_timeseries
# Show 1D time series
def show_1d_timeseries(ts, title, xmin, xmax, ymin, ymax, start=0, timesteps=-1):
"""
Show 1D time series
:param ts:
:param title:
:return:
"""
# Fig
fig = plt.figure()
ax = fig.gca()
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.grid(True)
if timesteps == -1:
ax.plot(ts[:, 0].numpy())
else:
ax.plot(ts[start:start+timesteps, 0].numpy())
# end if
ax.set_xlabel("X Axis")
ax.set_title(title)
plt.show()
plt.close()
# end show_1d_timeseries
| gpl-3.0 |
liuwenf/moose | modules/tensor_mechanics/test/tests/capped_mohr_coulomb/small_deform_5_6_7.py | 4 | 1376 | #!/usr/bin/env python
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
def expected(fn, mi, ma):
f = open(fn, "r")
data = sorted([map(float, line.strip().split(",")[mi:ma]) for line in f.readlines()[1:]])
data = [d for d in data if d[0] >= d[1]]
return zip(*data)
def moose(fn, mi, ma):
f = open(fn, "r")
data = [map(float, line.strip().split(",")[mi:ma]) for line in f.readlines()[2:-1]]
return zip(*data)
plt.figure()
e5 = expected("gold/expected_small_deform_5.csv", 3, 5)
e6 = expected("gold/expected_small_deform_6.csv", 3, 5)
e7 = expected("gold/expected_small_deform_7.csv", 4, 6)
m5 = moose("gold/small_deform5.csv", 2, 4)
m6 = moose("gold/small_deform6.csv", 2, 4)
m7 = moose("gold/small_deform7.csv", 3, 5)
plt.plot(e5[1], e5[0], 'k-', linewidth = 3.0, label = 'expected (Smin=0)')
plt.plot(m5[1], m5[0], 'ks', label = 'MOOSE (Smin=0')
plt.plot(e6[1], e6[0], 'b-', linewidth = 3.0, label = 'expected (Smid = Smin)')
plt.plot(m6[1], m6[0], 'b^', label = 'MOOSE (Smid approx Smin)')
plt.plot(e7[1], e7[0], 'r-', linewidth = 3.0, label = 'expected (Smax = Smid)')
plt.plot(m7[1], m7[0], 'ro', label = 'MOOSE (Smax = Smid)')
plt.xlim([0,1])
plt.legend(loc = 'lower left')
plt.xlabel("S_mid or S_min")
plt.ylabel("S_max")
plt.title("Tensile yield surface")
plt.savefig("figures/small_deform_5_6_7.eps")
sys.exit(0)
| lgpl-2.1 |
arokem/nipype | nipype/algorithms/rapidart.py | 9 | 30137 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
The rapidart module provides routines for artifact detection and region of
interest analysis.
These functions include:
* ArtifactDetect: performs artifact detection on functional images
* StimulusCorrelation: determines correlation between stimuli
schedule and movement/intensity parameters
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data'))
>>> os.chdir(datadir)
"""
import os
from copy import deepcopy
from warnings import warn
from nibabel import load, funcs, Nifti1Image
import numpy as np
from scipy import signal
import scipy.io as sio
from nipype.external import six
from ..interfaces.base import (BaseInterface, traits, InputMultiPath,
OutputMultiPath, TraitedSpec, File,
BaseInterfaceInputSpec, isdefined)
from ..utils.filemanip import filename_to_list, save_json, split_filename
from ..utils.misc import find_indices
from .. import logging, config
iflogger = logging.getLogger('interface')
def _get_affine_matrix(params, source):
"""Return affine matrix given a set of translation and rotation parameters
params : np.array (upto 12 long) in native package format
source : the package that generated the parameters
supports SPM, AFNI, FSFAST, FSL, NIPY
"""
if source == 'FSL':
params = params[[3, 4, 5, 0, 1, 2]]
elif source in ('AFNI', 'FSFAST'):
params = params[np.asarray([4, 5, 3, 1, 2, 0]) + (len(params) > 6)]
params[3:] = params[3:] * np.pi / 180.
if source == 'NIPY':
# nipy does not store typical euler angles, use nipy to convert
from nipy.algorithms.registration import to_matrix44
return to_matrix44(params)
#process for FSL, SPM, AFNI and FSFAST
rotfunc = lambda x: np.array([[np.cos(x), np.sin(x)],
[-np.sin(x), np.cos(x)]])
q = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0])
if len(params) < 12:
params = np.hstack((params, q[len(params):]))
params.shape = (len(params),)
# Translation
T = np.eye(4)
T[0:3, -1] = params[0:3]
# Rotation
Rx = np.eye(4)
Rx[1:3, 1:3] = rotfunc(params[3])
Ry = np.eye(4)
Ry[(0, 0, 2, 2), (0, 2, 0, 2)] = rotfunc(params[4]).ravel()
Rz = np.eye(4)
Rz[0:2, 0:2] = rotfunc(params[5])
# Scaling
S = np.eye(4)
S[0:3, 0:3] = np.diag(params[6:9])
# Shear
Sh = np.eye(4)
Sh[(0, 0, 1), (1, 2, 2)] = params[9:12]
if source in ('AFNI', 'FSFAST'):
return np.dot(T, np.dot(Ry, np.dot(Rx, np.dot(Rz, np.dot(S, Sh)))))
return np.dot(T, np.dot(Rx, np.dot(Ry, np.dot(Rz, np.dot(S, Sh)))))
def _calc_norm(mc, use_differences, source, brain_pts=None):
"""Calculates the maximum overall displacement of the midpoints
of the faces of a cube due to translation and rotation.
Parameters
----------
mc : motion parameter estimates
[3 translation, 3 rotation (radians)]
use_differences : boolean
brain_pts : [4 x n_points] of coordinates
Returns
-------
norm : at each time point
displacement : euclidean distance (mm) of displacement at each coordinate
"""
if brain_pts is None:
respos = np.diag([70, 70, 75])
resneg = np.diag([-70, -110, -45])
all_pts = np.vstack((np.hstack((respos, resneg)), np.ones((1, 6))))
displacement = None
else:
all_pts = brain_pts
n_pts = all_pts.size - all_pts.shape[1]
newpos = np.zeros((mc.shape[0], n_pts))
if brain_pts is not None:
displacement = np.zeros((mc.shape[0], n_pts / 3))
for i in range(mc.shape[0]):
affine = _get_affine_matrix(mc[i, :], source)
newpos[i, :] = np.dot(affine,
all_pts)[0:3, :].ravel()
if brain_pts is not None:
displacement[i, :] = \
np.sqrt(np.sum(np.power(np.reshape(newpos[i, :],
(3, all_pts.shape[1])) -
all_pts[0:3, :],
2),
axis=0))
# np.savez('displacement.npz', newpos=newpos, pts=all_pts)
normdata = np.zeros(mc.shape[0])
if use_differences:
newpos = np.concatenate((np.zeros((1, n_pts)),
np.diff(newpos, n=1, axis=0)), axis=0)
for i in range(newpos.shape[0]):
normdata[i] = \
np.max(np.sqrt(np.sum(np.reshape(np.power(np.abs(newpos[i, :]), 2),
(3, all_pts.shape[1])), axis=0)))
else:
newpos = np.abs(signal.detrend(newpos, axis=0, type='constant'))
normdata = np.sqrt(np.mean(np.power(newpos, 2), axis=1))
return normdata, displacement
def _nanmean(a, axis=None):
"""Return the mean excluding items that are nan
>>> a = [1, 2, np.nan]
>>> _nanmean(a)
1.5
"""
if axis:
return np.nansum(a, axis) / np.sum(1 - np.isnan(a), axis)
else:
return np.nansum(a) / np.sum(1 - np.isnan(a))
class ArtifactDetectInputSpec(BaseInterfaceInputSpec):
realigned_files = InputMultiPath(File(exists=True),
desc="Names of realigned functional data files",
mandatory=True)
realignment_parameters = InputMultiPath(File(exists=True), mandatory=True,
desc=("Names of realignment parameters"
"corresponding to the functional data files"))
parameter_source = traits.Enum("SPM", "FSL", "AFNI", "NiPy", "FSFAST",
desc="Source of movement parameters",
mandatory=True)
use_differences = traits.ListBool([True, False], minlen=2, maxlen=2,
usedefault=True,
desc=("Use differences between successive motion (first element)"
"and intensity paramter (second element) estimates in order"
"to determine outliers. (default is [True, False])"))
use_norm = traits.Bool(True, requires=['norm_threshold'],
desc=("Uses a composite of the motion parameters in "
"order to determine outliers."),
usedefault=True)
norm_threshold = traits.Float(desc=("Threshold to use to detect motion-rela"
"ted outliers when composite motion is "
"being used"), mandatory=True,
xor=['rotation_threshold',
'translation_threshold'])
rotation_threshold = traits.Float(mandatory=True, xor=['norm_threshold'],
desc=("Threshold (in radians) to use to detect rotation-related "
"outliers"))
translation_threshold = traits.Float(mandatory=True, xor=['norm_threshold'],
desc=("Threshold (in mm) to use to detect translation-related "
"outliers"))
zintensity_threshold = traits.Float(mandatory=True,
desc=("Intensity Z-threshold use to detection images that deviate "
"from the mean"))
mask_type = traits.Enum('spm_global', 'file', 'thresh',
desc=("Type of mask that should be used to mask the functional "
"data. *spm_global* uses an spm_global like calculation to "
"determine the brain mask. *file* specifies a brain mask "
"file (should be an image file consisting of 0s and 1s). "
"*thresh* specifies a threshold to use. By default all voxels"
"are used, unless one of these mask types are defined."),
mandatory=True)
mask_file = File(exists=True,
desc="Mask file to be used if mask_type is 'file'.")
mask_threshold = traits.Float(desc=("Mask threshold to be used if mask_type"
" is 'thresh'."))
intersect_mask = traits.Bool(True,
desc=("Intersect the masks when computed from "
"spm_global."))
save_plot = traits.Bool(True, desc="save plots containing outliers",
usedefault=True)
plot_type = traits.Enum('png', 'svg', 'eps', 'pdf',
desc="file type of the outlier plot",
usedefault=True)
bound_by_brainmask = traits.Bool(False, desc=("use the brain mask to "
"determine bounding box"
"for composite norm (works"
"for SPM and Nipy - currently"
"inaccurate for FSL, AFNI"),
usedefault=True)
global_threshold = traits.Float(8.0, desc=("use this threshold when mask "
"type equal's spm_global"),
usedefault=True)
class ArtifactDetectOutputSpec(TraitedSpec):
outlier_files = OutputMultiPath(File(exists=True),
desc=("One file for each functional run containing a list of "
"0-based indices corresponding to outlier volumes"))
intensity_files = OutputMultiPath(File(exists=True),
desc=("One file for each functional run containing the global "
"intensity values determined from the brainmask"))
norm_files = OutputMultiPath(File,
desc=("One file for each functional run containing the composite "
"norm"))
statistic_files = OutputMultiPath(File(exists=True),
desc=("One file for each functional run containing information "
"about the different types of artifacts and if design info is"
" provided then details of stimulus correlated motion and a "
"listing or artifacts by event type."))
plot_files = OutputMultiPath(File,
desc=("One image file for each functional run containing the "
"detected outliers"))
mask_files = OutputMultiPath(File,
desc=("One image file for each functional run containing the mask"
"used for global signal calculation"))
displacement_files = OutputMultiPath(File,
desc=("One image file for each functional run containing the voxel"
"displacement timeseries"))
class ArtifactDetect(BaseInterface):
"""Detects outliers in a functional imaging series
Uses intensity and motion parameters to infer outliers. If `use_norm` is
True, it computes the movement of the center of each face a cuboid centered
around the head and returns the maximal movement across the centers.
Examples
--------
>>> ad = ArtifactDetect()
>>> ad.inputs.realigned_files = 'functional.nii'
>>> ad.inputs.realignment_parameters = 'functional.par'
>>> ad.inputs.parameter_source = 'FSL'
>>> ad.inputs.norm_threshold = 1
>>> ad.inputs.use_differences = [True, False]
>>> ad.inputs.zintensity_threshold = 3
>>> ad.run() # doctest: +SKIP
"""
input_spec = ArtifactDetectInputSpec
output_spec = ArtifactDetectOutputSpec
def __init__(self, **inputs):
super(ArtifactDetect, self).__init__(**inputs)
def _get_output_filenames(self, motionfile, output_dir):
"""Generate output files based on motion filenames
Parameters
----------
motionfile: file/string
Filename for motion parameter file
output_dir: string
output directory in which the files will be generated
"""
if isinstance(motionfile, six.string_types):
infile = motionfile
elif isinstance(motionfile, list):
infile = motionfile[0]
else:
raise Exception("Unknown type of file")
_, filename, ext = split_filename(infile)
artifactfile = os.path.join(output_dir, ''.join(('art.', filename,
'_outliers.txt')))
intensityfile = os.path.join(output_dir, ''.join(('global_intensity.',
filename, '.txt')))
statsfile = os.path.join(output_dir, ''.join(('stats.', filename,
'.txt')))
normfile = os.path.join(output_dir, ''.join(('norm.', filename,
'.txt')))
plotfile = os.path.join(output_dir, ''.join(('plot.', filename, '.',
self.inputs.plot_type)))
displacementfile = os.path.join(output_dir, ''.join(('disp.',
filename, ext)))
maskfile = os.path.join(output_dir, ''.join(('mask.', filename, ext)))
return (artifactfile, intensityfile, statsfile, normfile, plotfile,
displacementfile, maskfile)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['outlier_files'] = []
outputs['intensity_files'] = []
outputs['statistic_files'] = []
outputs['mask_files'] = []
if isdefined(self.inputs.use_norm) and self.inputs.use_norm:
outputs['norm_files'] = []
if self.inputs.bound_by_brainmask:
outputs['displacement_files'] = []
if isdefined(self.inputs.save_plot) and self.inputs.save_plot:
outputs['plot_files'] = []
for i, f in enumerate(filename_to_list(self.inputs.realigned_files)):
(outlierfile, intensityfile, statsfile, normfile, plotfile,
displacementfile, maskfile) = \
self._get_output_filenames(f, os.getcwd())
outputs['outlier_files'].insert(i, outlierfile)
outputs['intensity_files'].insert(i, intensityfile)
outputs['statistic_files'].insert(i, statsfile)
outputs['mask_files'].insert(i, maskfile)
if isdefined(self.inputs.use_norm) and self.inputs.use_norm:
outputs['norm_files'].insert(i, normfile)
if self.inputs.bound_by_brainmask:
outputs['displacement_files'].insert(i, displacementfile)
if isdefined(self.inputs.save_plot) and self.inputs.save_plot:
outputs['plot_files'].insert(i, plotfile)
return outputs
def _plot_outliers_with_wave(self, wave, outliers, name):
import matplotlib.pyplot as plt
plt.plot(wave)
plt.ylim([wave.min(), wave.max()])
plt.xlim([0, len(wave) - 1])
if len(outliers):
plt.plot(np.tile(outliers[:, None], (1, 2)).T,
np.tile([wave.min(), wave.max()], (len(outliers), 1)).T,
'r')
plt.xlabel('Scans - 0-based')
plt.ylabel(name)
def _detect_outliers_core(self, imgfile, motionfile, runidx, cwd=None):
"""
Core routine for detecting outliers
"""
if not cwd:
cwd = os.getcwd()
# read in functional image
if isinstance(imgfile, six.string_types):
nim = load(imgfile)
elif isinstance(imgfile, list):
if len(imgfile) == 1:
nim = load(imgfile[0])
else:
images = [load(f) for f in imgfile]
nim = funcs.concat_images(images)
# compute global intensity signal
(x, y, z, timepoints) = nim.get_shape()
data = nim.get_data()
affine = nim.get_affine()
g = np.zeros((timepoints, 1))
masktype = self.inputs.mask_type
if masktype == 'spm_global': # spm_global like calculation
iflogger.debug('art: using spm global')
intersect_mask = self.inputs.intersect_mask
if intersect_mask:
mask = np.ones((x, y, z), dtype=bool)
for t0 in range(timepoints):
vol = data[:, :, :, t0]
# Use an SPM like approach
mask_tmp = vol > \
(_nanmean(vol) / self.inputs.global_threshold)
mask = mask * mask_tmp
for t0 in range(timepoints):
vol = data[:, :, :, t0]
g[t0] = _nanmean(vol[mask])
if len(find_indices(mask)) < (np.prod((x, y, z)) / 10):
intersect_mask = False
g = np.zeros((timepoints, 1))
if not intersect_mask:
iflogger.info('not intersect_mask is True')
mask = np.zeros((x, y, z, timepoints))
for t0 in range(timepoints):
vol = data[:, :, :, t0]
mask_tmp = vol > \
(_nanmean(vol) / self.inputs.global_threshold)
mask[:, :, :, t0] = mask_tmp
g[t0] = np.nansum(vol * mask_tmp)/np.nansum(mask_tmp)
elif masktype == 'file': # uses a mask image to determine intensity
maskimg = load(self.inputs.mask_file)
mask = maskimg.get_data()
affine = maskimg.get_affine()
mask = mask > 0.5
for t0 in range(timepoints):
vol = data[:, :, :, t0]
g[t0] = _nanmean(vol[mask])
elif masktype == 'thresh': # uses a fixed signal threshold
for t0 in range(timepoints):
vol = data[:, :, :, t0]
mask = vol > self.inputs.mask_threshold
g[t0] = _nanmean(vol[mask])
else:
mask = np.ones((x, y, z))
g = _nanmean(data[mask > 0, :], 1)
# compute normalized intensity values
gz = signal.detrend(g, axis=0) # detrend the signal
if self.inputs.use_differences[1]:
gz = np.concatenate((np.zeros((1, 1)), np.diff(gz, n=1, axis=0)),
axis=0)
gz = (gz - np.mean(gz)) / np.std(gz) # normalize the detrended signal
iidx = find_indices(abs(gz) > self.inputs.zintensity_threshold)
# read in motion parameters
mc_in = np.loadtxt(motionfile)
mc = deepcopy(mc_in)
(artifactfile, intensityfile, statsfile, normfile, plotfile,
displacementfile, maskfile) = self._get_output_filenames(imgfile, cwd)
mask_img = Nifti1Image(mask.astype(np.uint8), affine)
mask_img.to_filename(maskfile)
if self.inputs.use_norm:
brain_pts = None
if self.inputs.bound_by_brainmask:
voxel_coords = np.nonzero(mask)
coords = np.vstack((voxel_coords[0],
np.vstack((voxel_coords[1],
voxel_coords[2])))).T
brain_pts = np.dot(affine,
np.hstack((coords,
np.ones((coords.shape[0], 1)))).T)
# calculate the norm of the motion parameters
normval, displacement = _calc_norm(mc,
self.inputs.use_differences[0],
self.inputs.parameter_source,
brain_pts=brain_pts)
tidx = find_indices(normval > self.inputs.norm_threshold)
ridx = find_indices(normval < 0)
if displacement is not None:
dmap = np.zeros((x, y, z, timepoints), dtype=np.float)
for i in range(timepoints):
dmap[voxel_coords[0],
voxel_coords[1],
voxel_coords[2], i] = displacement[i, :]
dimg = Nifti1Image(dmap, affine)
dimg.to_filename(displacementfile)
else:
if self.inputs.use_differences[0]:
mc = np.concatenate((np.zeros((1, 6)),
np.diff(mc_in, n=1, axis=0)),
axis=0)
traval = mc[:, 0:3] # translation parameters (mm)
rotval = mc[:, 3:6] # rotation parameters (rad)
tidx = find_indices(np.sum(abs(traval) >
self.inputs.translation_threshold, 1)
> 0)
ridx = find_indices(np.sum(abs(rotval) >
self.inputs.rotation_threshold, 1) > 0)
outliers = np.unique(np.union1d(iidx, np.union1d(tidx, ridx)))
# write output to outputfile
np.savetxt(artifactfile, outliers, fmt='%d', delimiter=' ')
np.savetxt(intensityfile, g, fmt='%.2f', delimiter=' ')
if self.inputs.use_norm:
np.savetxt(normfile, normval, fmt='%.4f', delimiter=' ')
if isdefined(self.inputs.save_plot) and self.inputs.save_plot:
import matplotlib
matplotlib.use(config.get("execution", "matplotlib_backend"))
import matplotlib.pyplot as plt
fig = plt.figure()
if isdefined(self.inputs.use_norm) and self.inputs.use_norm:
plt.subplot(211)
else:
plt.subplot(311)
self._plot_outliers_with_wave(gz, iidx, 'Intensity')
if isdefined(self.inputs.use_norm) and self.inputs.use_norm:
plt.subplot(212)
self._plot_outliers_with_wave(normval, np.union1d(tidx, ridx),
'Norm (mm)')
else:
diff = ''
if self.inputs.use_differences[0]:
diff = 'diff'
plt.subplot(312)
self._plot_outliers_with_wave(traval, tidx,
'Translation (mm)' + diff)
plt.subplot(313)
self._plot_outliers_with_wave(rotval, ridx,
'Rotation (rad)' + diff)
plt.savefig(plotfile)
plt.close(fig)
motion_outliers = np.union1d(tidx, ridx)
stats = [{'motion_file': motionfile,
'functional_file': imgfile},
{'common_outliers': len(np.intersect1d(iidx, motion_outliers)),
'intensity_outliers': len(np.setdiff1d(iidx,
motion_outliers)),
'motion_outliers': len(np.setdiff1d(motion_outliers, iidx)),
},
{'motion': [{'using differences': self.inputs.use_differences[0]},
{'mean': np.mean(mc_in, axis=0).tolist(),
'min': np.min(mc_in, axis=0).tolist(),
'max': np.max(mc_in, axis=0).tolist(),
'std': np.std(mc_in, axis=0).tolist()},
]},
{'intensity': [{'using differences': self.inputs.use_differences[1]},
{'mean': np.mean(gz, axis=0).tolist(),
'min': np.min(gz, axis=0).tolist(),
'max': np.max(gz, axis=0).tolist(),
'std': np.std(gz, axis=0).tolist()},
]},
]
if self.inputs.use_norm:
stats.insert(3, {'motion_norm':
{'mean': np.mean(normval, axis=0).tolist(),
'min': np.min(normval, axis=0).tolist(),
'max': np.max(normval, axis=0).tolist(),
'std': np.std(normval, axis=0).tolist(),
}})
save_json(statsfile, stats)
def _run_interface(self, runtime):
"""Execute this module.
"""
funcfilelist = filename_to_list(self.inputs.realigned_files)
motparamlist = filename_to_list(self.inputs.realignment_parameters)
for i, imgf in enumerate(funcfilelist):
self._detect_outliers_core(imgf, motparamlist[i], i,
cwd=os.getcwd())
return runtime
class StimCorrInputSpec(BaseInterfaceInputSpec):
realignment_parameters = InputMultiPath(File(exists=True), mandatory=True,
desc=('Names of realignment parameters corresponding to the functional '
'data files'))
intensity_values = InputMultiPath(File(exists=True), mandatory=True,
desc='Name of file containing intensity values')
spm_mat_file = File(exists=True, mandatory=True,
desc='SPM mat file (use pre-estimate SPM.mat file)')
concatenated_design = traits.Bool(mandatory=True,
desc='state if the design matrix contains concatenated sessions')
class StimCorrOutputSpec(TraitedSpec):
stimcorr_files = OutputMultiPath(File(exists=True),
desc='List of files containing correlation values')
class StimulusCorrelation(BaseInterface):
"""Determines if stimuli are correlated with motion or intensity
parameters.
Currently this class supports an SPM generated design matrix and requires
intensity parameters. This implies that one must run
:ref:`ArtifactDetect <nipype.algorithms.rapidart.ArtifactDetect>`
and :ref:`Level1Design <nipype.interfaces.spm.model.Level1Design>` prior to running this or
provide an SPM.mat file and intensity parameters through some other means.
Examples
--------
>>> sc = StimulusCorrelation()
>>> sc.inputs.realignment_parameters = 'functional.par'
>>> sc.inputs.intensity_values = 'functional.rms'
>>> sc.inputs.spm_mat_file = 'SPM.mat'
>>> sc.inputs.concatenated_design = False
>>> sc.run() # doctest: +SKIP
"""
input_spec = StimCorrInputSpec
output_spec = StimCorrOutputSpec
def _get_output_filenames(self, motionfile, output_dir):
"""Generate output files based on motion filenames
Parameters
----------
motionfile: file/string
Filename for motion parameter file
output_dir: string
output directory in which the files will be generated
"""
(_, filename) = os.path.split(motionfile)
(filename, _) = os.path.splitext(filename)
corrfile = os.path.join(output_dir, ''.join(('qa.', filename,
'_stimcorr.txt')))
return corrfile
def _stimcorr_core(self, motionfile, intensityfile, designmatrix, cwd=None):
"""
Core routine for determining stimulus correlation
"""
if not cwd:
cwd = os.getcwd()
# read in motion parameters
mc_in = np.loadtxt(motionfile)
g_in = np.loadtxt(intensityfile)
g_in.shape = g_in.shape[0], 1
dcol = designmatrix.shape[1]
mccol = mc_in.shape[1]
concat_matrix = np.hstack((np.hstack((designmatrix, mc_in)), g_in))
cm = np.corrcoef(concat_matrix, rowvar=0)
corrfile = self._get_output_filenames(motionfile, cwd)
# write output to outputfile
file = open(corrfile, 'w')
file.write("Stats for:\n")
file.write("Stimulus correlated motion:\n%s\n" % motionfile)
for i in range(dcol):
file.write("SCM.%d:" % i)
for v in cm[i, dcol + np.arange(mccol)]:
file.write(" %.2f" % v)
file.write('\n')
file.write("Stimulus correlated intensity:\n%s\n" % intensityfile)
for i in range(dcol):
file.write("SCI.%d: %.2f\n" % (i, cm[i, -1]))
file.close()
def _get_spm_submatrix(self, spmmat, sessidx, rows=None):
"""
Parameters
----------
spmmat: scipy matlab object
full SPM.mat file loaded into a scipy object
sessidx: int
index to session that needs to be extracted.
"""
designmatrix = spmmat['SPM'][0][0].xX[0][0].X
U = spmmat['SPM'][0][0].Sess[0][sessidx].U[0]
if rows is None:
rows = spmmat['SPM'][0][0].Sess[0][sessidx].row[0] - 1
cols = spmmat['SPM'][0][0].Sess[0][sessidx].col[0][range(len(U))] - 1
outmatrix = designmatrix.take(rows.tolist(), axis=0).take(cols.tolist(),
axis=1)
return outmatrix
def _run_interface(self, runtime):
"""Execute this module.
"""
motparamlist = self.inputs.realignment_parameters
intensityfiles = self.inputs.intensity_values
spmmat = sio.loadmat(self.inputs.spm_mat_file, struct_as_record=False)
nrows = []
for i in range(len(motparamlist)):
sessidx = i
rows = None
if self.inputs.concatenated_design:
sessidx = 0
mc_in = np.loadtxt(motparamlist[i])
rows = np.sum(nrows) + np.arange(mc_in.shape[0])
nrows.append(mc_in.shape[0])
matrix = self._get_spm_submatrix(spmmat, sessidx, rows)
self._stimcorr_core(motparamlist[i], intensityfiles[i],
matrix, os.getcwd())
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
files = []
for i, f in enumerate(self.inputs.realignment_parameters):
files.insert(i, self._get_output_filenames(f, os.getcwd()))
if files:
outputs['stimcorr_files'] = files
return outputs
| bsd-3-clause |
381426068/MissionPlanner | Lib/site-packages/scipy/signal/waveforms.py | 55 | 11609 | # Author: Travis Oliphant
# 2003
#
# Feb. 2010: Updated by Warren Weckesser:
# Rewrote much of chirp()
# Added sweep_poly()
from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
exp, cos, sin, polyval, polyint
def sawtooth(t, width=1):
"""
Return a periodic sawtooth waveform.
The sawtooth waveform has a period 2*pi, rises from -1 to 1 on the
interval 0 to width*2*pi and drops from 1 to -1 on the interval
width*2*pi to 2*pi. `width` must be in the interval [0,1].
Parameters
----------
t : array_like
Time.
width : float, optional
Width of the waveform. Default is 1.
Returns
-------
y : ndarray
Output array containing the sawtooth waveform.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 20*np.pi, 500)
>>> plt.plot(x, sp.signal.sawtooth(x))
"""
t,w = asarray(t), asarray(width)
w = asarray(w + (t-t))
t = asarray(t + (w-w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape,ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y,mask1,nan)
# take t modulo 2*pi
tmod = mod(t,2*pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1-mask1) & (tmod < w*2*pi)
tsub = extract(mask2,tmod)
wsub = extract(mask2,w)
place(y,mask2,tsub / (pi*wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1-mask1) & (1-mask2)
tsub = extract(mask3,tmod)
wsub = extract(mask3,w)
place(y,mask3, (pi*(wsub+1)-tsub)/(pi*(1-wsub)))
return y
def square(t, duty=0.5):
"""
Return a periodic square-wave waveform.
The square wave has a period 2*pi, has value +1 from 0 to 2*pi*duty
and -1 from 2*pi*duty to 2*pi. `duty` must be in the interval [0,1].
Parameters
----------
t : array_like
The input time array.
duty : float, optional
Duty cycle.
Returns
-------
y : array_like
The output square wave.
"""
t,w = asarray(t), asarray(duty)
w = asarray(w + (t-t))
t = asarray(t + (w-w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape,ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y,mask1,nan)
# take t modulo 2*pi
tmod = mod(t,2*pi)
# on the interval 0 to duty*2*pi function is
# 1
mask2 = (1-mask1) & (tmod < w*2*pi)
tsub = extract(mask2,tmod)
wsub = extract(mask2,w)
place(y,mask2,1)
# on the interval duty*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1-mask1) & (1-mask2)
tsub = extract(mask3,tmod)
wsub = extract(mask3,w)
place(y,mask3,-1)
return y
def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False, retenv=False):
"""
Return a gaussian modulated sinusoid: exp(-a t^2) exp(1j*2*pi*fc*t).
If `retquad` is True, then return the real and imaginary parts
(in-phase and quadrature).
If `retenv` is True, then return the envelope (unmodulated signal).
Otherwise, return the real part of the modulated sinusoid.
Parameters
----------
t : ndarray, or the string 'cutoff'
Input array.
fc : int, optional
Center frequency (Hz). Default is 1000.
bw : float, optional
Fractional bandwidth in frequency domain of pulse (Hz).
Default is 0.5.
bwr: float, optional
Reference level at which fractional bandwidth is calculated (dB).
Default is -6.
tpr : float, optional
If `t` is 'cutoff', then the function returns the cutoff
time for when the pulse amplitude falls below `tpr` (in dB).
Default is -60.
retquad : bool, optional
If True, return the quadrature (imaginary) as well as the real part
of the signal. Default is False.
retenv : bool, optional
If True, return the envelope of the signal. Default is False.
"""
if fc < 0:
raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc)
if bw <= 0:
raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw)
if bwr >= 0:
raise ValueError("Reference level for bandwidth (bwr=%.2f) must "
"be < 0 dB" % bwr)
# exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
ref = pow(10.0, bwr / 20.0)
# fdel = fc*bw/2: g(fdel) = ref --- solve this for a
#
# pi^2/a * fc^2 * bw^2 /4=-log(ref)
a = -(pi*fc*bw)**2 / (4.0*log(ref))
if t == 'cutoff': # compute cut_off point
# Solve exp(-a tc**2) = tref for tc
# tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
if tpr >= 0:
raise ValueError("Reference level for time cutoff must be < 0 dB")
tref = pow(10.0, tpr / 20.0)
return sqrt(-log(tref)/a)
yenv = exp(-a*t*t)
yI = yenv * cos(2*pi*fc*t)
yQ = yenv * sin(2*pi*fc*t)
if not retquad and not retenv:
return yI
if not retquad and retenv:
return yI, yenv
if retquad and not retenv:
return yI, yQ
if retquad and retenv:
return yI, yQ, yenv
def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):
"""Frequency-swept cosine generator.
In the following, 'Hz' should be interpreted as 'cycles per time unit';
there is no assumption here that the time unit is one second. The
important distinction is that the units of rotation are cycles, not
radians.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
f0 : float
Frequency (in Hz) at time t=0.
t1 : float
Time at which `f1` is specified.
f1 : float
Frequency (in Hz) of the waveform at time `t1`.
method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
Kind of frequency sweep. If not given, `linear` is assumed. See
Notes below for more details.
phi : float, optional
Phase offset, in degrees. Default is 0.
vertex_zero : bool, optional
This parameter is only used when `method` is 'quadratic'.
It determines whether the vertex of the parabola that is the graph
of the frequency is at t=0 or t=t1.
Returns
-------
A numpy array containing the signal evaluated at 't' with the requested
time-varying frequency. More precisely, the function returns:
``cos(phase + (pi/180)*phi)``
where `phase` is the integral (from 0 to t) of ``2*pi*f(t)``.
``f(t)`` is defined below.
See Also
--------
scipy.signal.waveforms.sweep_poly
Notes
-----
There are four options for the `method`. The following formulas give
the instantaneous frequency (in Hz) of the signal generated by
`chirp()`. For convenience, the shorter names shown below may also be
used.
linear, lin, li:
``f(t) = f0 + (f1 - f0) * t / t1``
quadratic, quad, q:
The graph of the frequency f(t) is a parabola through (0, f0) and
(t1, f1). By default, the vertex of the parabola is at (0, f0).
If `vertex_zero` is False, then the vertex is at (t1, f1). The
formula is:
if vertex_zero is True:
``f(t) = f0 + (f1 - f0) * t**2 / t1**2``
else:
``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``
To use a more general quadratic function, or an arbitrary
polynomial, use the function `scipy.signal.waveforms.sweep_poly`.
logarithmic, log, lo:
``f(t) = f0 * (f1/f0)**(t/t1)``
f0 and f1 must be nonzero and have the same sign.
This signal is also known as a geometric or exponential chirp.
hyperbolic, hyp:
``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``
f1 must be positive, and f0 must be greater than f1.
"""
# 'phase' is computed in _chirp_phase, to make testing easier.
phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)
# Convert phi to radians.
phi *= pi / 180
return cos(phase + phi)
def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
"""
Calculate the phase used by chirp_phase to generate its output.
See `chirp_phase` for a description of the arguments.
"""
f0 = float(f0)
t1 = float(t1)
f1 = float(f1)
if method in ['linear', 'lin', 'li']:
beta = (f1 - f0) / t1
phase = 2*pi * (f0*t + 0.5*beta*t*t)
elif method in ['quadratic','quad','q']:
beta = (f1 - f0)/(t1**2)
if vertex_zero:
phase = 2*pi * (f0*t + beta * t**3/3)
else:
phase = 2*pi * (f1*t + beta * ((t1 - t)**3 - t1**3)/3)
elif method in ['logarithmic', 'log', 'lo']:
if f0*f1 <= 0.0:
raise ValueError("For a geometric chirp, f0 and f1 must be nonzero " \
"and have the same sign.")
if f0 == f1:
phase = 2*pi * f0 * t
else:
beta = t1 / log(f1/f0)
phase = 2*pi * beta * f0 * (pow(f1/f0, t/t1) - 1.0)
elif method in ['hyperbolic', 'hyp']:
if f1 <= 0.0 or f0 <= f1:
raise ValueError("hyperbolic chirp requires f0 > f1 > 0.0.")
c = f1*t1
df = f0 - f1
phase = 2*pi * (f0 * c / df) * log((df*t + c)/c)
else:
raise ValueError("method must be 'linear', 'quadratic', 'logarithmic', "
"or 'hyperbolic', but a value of %r was given." % method)
return phase
def sweep_poly(t, poly, phi=0):
"""Frequency-swept cosine generator, with a time-dependent frequency
specified as a polynomial.
This function generates a sinusoidal function whose instantaneous
frequency varies with time. The frequency at time `t` is given by
the polynomial `poly`.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
poly : 1D ndarray (or array-like), or instance of numpy.poly1d
The desired frequency expressed as a polynomial. If `poly` is
a list or ndarray of length n, then the elements of `poly` are
the coefficients of the polynomial, and the instantaneous
frequency is
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of numpy.poly1d, then the
instantaneous frequency is
``f(t) = poly(t)``
phi : float, optional
Phase offset, in degrees. Default is 0.
Returns
-------
A numpy array containing the signal evaluated at 't' with the requested
time-varying frequency. More precisely, the function returns
``cos(phase + (pi/180)*phi)``
where `phase` is the integral (from 0 to t) of ``2 * pi * f(t)``;
``f(t)`` is defined above.
See Also
--------
scipy.signal.waveforms.chirp
Notes
-----
.. versionadded:: 0.8.0
"""
# 'phase' is computed in _sweep_poly_phase, to make testing easier.
phase = _sweep_poly_phase(t, poly)
# Convert to radians.
phi *= pi / 180
return cos(phase + phi)
def _sweep_poly_phase(t, poly):
"""
Calculate the phase used by sweep_poly to generate its output.
See `sweep_poly` for a description of the arguments.
"""
# polyint handles lists, ndarrays and instances of poly1d automatically.
intpoly = polyint(poly)
phase = 2*pi * polyval(intpoly, t)
return phase
| gpl-3.0 |
Hbl15/ThinkStats2 | code/chap13soln.py | 68 | 2961 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import pandas
import numpy as np
import thinkplot
import thinkstats2
import survival
def CleanData(resp):
"""Cleans respondent data.
resp: DataFrame
"""
resp.cmdivorcx.replace([9998, 9999], np.nan, inplace=True)
resp['notdivorced'] = resp.cmdivorcx.isnull().astype(int)
resp['duration'] = (resp.cmdivorcx - resp.cmmarrhx) / 12.0
resp['durationsofar'] = (resp.cmintvw - resp.cmmarrhx) / 12.0
month0 = pandas.to_datetime('1899-12-15')
dates = [month0 + pandas.DateOffset(months=cm)
for cm in resp.cmbirth]
resp['decade'] = (pandas.DatetimeIndex(dates).year - 1900) // 10
def ResampleDivorceCurve(resps):
"""Plots divorce curves based on resampled data.
resps: list of respondent DataFrames
"""
for _ in range(41):
samples = [thinkstats2.ResampleRowsWeighted(resp)
for resp in resps]
sample = pandas.concat(samples, ignore_index=True)
PlotDivorceCurveByDecade(sample, color='#225EA8', alpha=0.1)
thinkplot.Show(xlabel='years',
axis=[0, 28, 0, 1])
def ResampleDivorceCurveByDecade(resps):
"""Plots divorce curves for each birth cohort.
resps: list of respondent DataFrames
"""
for i in range(41):
samples = [thinkstats2.ResampleRowsWeighted(resp)
for resp in resps]
sample = pandas.concat(samples, ignore_index=True)
groups = sample.groupby('decade')
if i == 0:
survival.AddLabelsByDecade(groups, alpha=0.7)
EstimateSurvivalByDecade(groups, alpha=0.1)
thinkplot.Save(root='survival7',
xlabel='years',
axis=[0, 28, 0, 1])
def EstimateSurvivalByDecade(groups, **options):
"""Groups respondents by decade and plots survival curves.
groups: GroupBy object
"""
thinkplot.PrePlot(len(groups))
for name, group in groups:
print(name, len(group))
_, sf = EstimateSurvival(group)
thinkplot.Plot(sf, **options)
def EstimateSurvival(resp):
"""Estimates the survival curve.
resp: DataFrame of respondents
returns: pair of HazardFunction, SurvivalFunction
"""
complete = resp[resp.notdivorced == 0].duration
ongoing = resp[resp.notdivorced == 1].durationsofar
hf = survival.EstimateHazardFunction(complete, ongoing)
sf = hf.MakeSurvival()
return hf, sf
def main():
resp6 = survival.ReadFemResp2002()
CleanData(resp6)
married6 = resp6[resp6.evrmarry==1]
resp7 = survival.ReadFemResp2010()
CleanData(resp7)
married7 = resp7[resp7.evrmarry==1]
ResampleDivorceCurveByDecade([married6, married7])
if __name__ == '__main__':
main()
| gpl-3.0 |
qbilius/streams | streams/envs/objectome.py | 1 | 2489 | import sys, os, hashlib, pickle, tempfile, zipfile, glob
from collections import OrderedDict
import numpy as np
import pandas
import tables
import boto3
import pymongo
import tqdm
import skimage, skimage.io, skimage.transform
from streams.envs.dataset import Dataset
from streams.utils import lazy_property
class Objectome(Dataset):
DATA = {'meta': 'streams/objectome/meta.pkl',
'images256': 'streams/objectome/imageset/ims24s100_256.npy',
'imageset/tfrecords': 'streams/objectome/imageset/images224.tfrecords',
}
OBJS = ['lo_poly_animal_RHINO_2',
'MB30758',
'calc01',
'interior_details_103_4',
'zebra',
'MB27346',
'build51',
'weimaraner',
'interior_details_130_2',
'lo_poly_animal_CHICKDEE',
'kitchen_equipment_knife2',
'lo_poly_animal_BEAR_BLK',
'MB30203',
'antique_furniture_item_18',
'lo_poly_animal_ELE_AS1',
'MB29874',
'womens_stockings_01M',
'Hanger_02',
'dromedary',
'MB28699',
'lo_poly_animal_TRANTULA',
'flarenut_spanner',
'womens_shorts_01M',
'22_acoustic_guitar']
def __init__(self):
self.name = 'objectome'
class Objectome24s10(Objectome):
DATA = {'meta': 'streams/objectome/meta.pkl',
'images224': 'streams/objectome/imageset/ims24s10_224.npy',
'sel240': 'streams/objectome/sel240.pkl',
'metrics240': 'streams/objectome/metrics240.pkl'}
OBJS = Objectome.OBJS
@lazy_property
def meta(self):
meta = super(Objectome24s10, self).meta
sel = pandas.read_pickle(self.datapath('sel240'))
return meta.loc[sel]
def human_data(self, kind='I2_dprime_C'):
"""
Kind:
- O1_hitrate, O1_accuracy, O1_dprime, O1_dprime_v2
- O2_hitrate, O2_accuracy, O2_dprime,
- I1_hitrate, I1_accuracy, I1_dprime, I1_dprime_C, I1_dprime_v2_C
- I2_hitrate, I2_accuracy, I2_dprime, I2_dprime_C, I1_dprime_v2
Rishi: "'v2' means averaging rather than pooling. So O1_dprime_v2 averages over all the distracter bins from O2, rather than pooling over all the trials."
"""
data = pandas.read_pickle(self.datapath('metrics240'))
# organized like: metric kind x 10 splits x 2 split halves
return data[kind] | gpl-3.0 |
I2Cvb/prostate | scratch/resampling_mrsi.py | 1 | 2747 | from __future__ import division
import numpy as np
import SimpleITK as sitk
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.collections import PolyCollection
from matplotlib.colors import colorConverter
import matplotlib.pyplot as plt
from protoclass.data_management import T2WModality
from protoclass.data_management import RDAModality
path_rda = '/data/prostate/experiments/Patient 996/MRSI/CSI_SE_3D_140ms_16c.rda'
path_t2w = '/data/prostate/experiments/Patient 996/T2W'
# Read the ground-truth
t2w_mod = T2WModality()
t2w_mod.read_data_from_path(path_t2w)
# Read the rda
rda_mod = RDAModality(1250.)
rda_mod.read_data_from_path(path_rda)
# Get the sitk image from the T2W
# We need to convert from numpy array to ITK
# Our convention was Y, X, Z
# We need to convert it in Z, Y, X which will be converted in X, Y, Z by ITK
t2w_img = sitk.GetImageFromArray(np.swapaxes(
np.swapaxes(t2w_mod.data_, 0, 1), 0, 2))
# Put all the spatial information
t2w_img.SetDirection(t2w_mod.metadata_['direction'])
t2w_img.SetOrigin(t2w_mod.metadata_['origin'])
t2w_img.SetSpacing(t2w_mod.metadata_['spacing'])
# Get the sitk image from the rda
rda_fake = np.random.randint(0, 255, size=(16, 16, 16))
rda_img = sitk.GetImageFromArray(rda_fake)
# Put all the spatial information
rda_img.SetDirection(rda_mod.metadata_['direction'])
rda_img.SetOrigin(rda_mod.metadata_['origin'])
rda_img.SetSpacing(rda_mod.metadata_['spacing'])
# Create a resampler object
transform = sitk.Transform()
transform.SetIdentity()
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(t2w_img)
resampler.SetInterpolator(sitk.sitkNearestNeighbor)
resampler.SetDefaultPixelValue(0)
resampler.SetTransform(transform)
resampler.SetOutputOrigin(rda_img.GetOrigin())
resampler.SetOutputDirection(rda_img.GetDirection())
res_img = resampler.Execute(t2w_img)
# Compute the distance of the X, Y, Z to make a croping of the ROI
size_x = int(rda_img.GetSize()[0] * (rda_img.GetSpacing()[0] /
t2w_img.GetSpacing()[0]))
size_y = int(rda_img.GetSize()[1] * (rda_img.GetSpacing()[1] /
t2w_img.GetSpacing()[1]))
size_z = int(rda_img.GetSize()[2] * (rda_img.GetSpacing()[2] /
t2w_img.GetSpacing()[2]))
out_np = sitk.GetArrayFromImage(res_img)
out_np = out_np[:size_z, :size_y, :size_x]
int_vol = np.zeros((rda_img.GetSize()))
for z in range(rda_img.GetSize()[0]):
for x in range(rda_img.GetSize()[1]):
for y in range(rda_img.GetSize()[2]):
int_vol[y, x, z] = np.sum(np.real(rda_mod.data_[:, y, x, z]))
for z in range(rda_img.GetSize()[0]):
int_vol /= np.sum(int_vol)
plt.figure()
plt.imshow(int_vol[:, :, 5])
plt.show()
| mit |
stylianos-kampakis/scikit-learn | sklearn/tests/test_cross_validation.py | 29 | 46740 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
y = np.arange(10) // 2
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_label_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_labels = 15
n_samples = 1000
n_folds = 5
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
labels = rng.randint(0, n_labels, n_samples)
folds = cval.LabelKFold(labels, n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Construct the test data
labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis',
'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']
n_labels = len(np.unique(labels))
n_samples = len(labels)
n_folds = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
folds = cval.LabelKFold(labels, n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Should fail if there are more folds than labels
labels = np.array([1, 1, 1, 2, 2])
assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1./3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False)
assert_true(isinstance(X_train_arr, np.ndarray))
assert_true(isinstance(X_test_arr, np.ndarray))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
| bsd-3-clause |
krahman/BuildingMachineLearningSystemsWithPython | ch02/figure4_5.py | 1 | 1987 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
COLOUR_FIGURE = False
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
from load import load_dataset
import numpy as np
from knn import learn_model, apply_model, accuracy
feature_names = [
'area',
'perimeter',
'compactness',
'length of kernel',
'width of kernel',
'asymmetry coefficien',
'length of kernel groove',
]
def train_plot(features, labels):
y0, y1 = features[:, 2].min() * .9, features[:, 2].max() * 1.1
x0, x1 = features[:, 0].min() * .9, features[:, 0].max() * 1.1
X = np.linspace(x0, x1, 100)
Y = np.linspace(y0, y1, 100)
X, Y = np.meshgrid(X, Y)
model = learn_model(1, features[:, (0, 2)], np.array(labels))
C = apply_model(
np.vstack([X.ravel(), Y.ravel()]).T, model).reshape(X.shape)
if COLOUR_FIGURE:
cmap = ListedColormap([(1., .6, .6), (.6, 1., .6), (.6, .6, 1.)])
else:
cmap = ListedColormap([(1., 1., 1.), (.2, .2, .2), (.6, .6, .6)])
plt.xlim(x0, x1)
plt.ylim(y0, y1)
plt.xlabel(feature_names[0])
plt.ylabel(feature_names[2])
plt.pcolormesh(X, Y, C, cmap=cmap)
if COLOUR_FIGURE:
cmap = ListedColormap([(1., .0, .0), (.0, 1., .0), (.0, .0, 1.)])
plt.scatter(features[:, 0], features[:, 2], c=labels, cmap=cmap)
else:
for lab, ma in zip(range(3), "Do^"):
plt.plot(features[labels == lab, 0], features[
labels == lab, 2], ma, c=(1., 1., 1.))
features, labels = load_dataset('seeds')
names = sorted(set(labels))
labels = np.array([names.index(ell) for ell in labels])
train_plot(features, labels)
plt.savefig('figure4.png')
features -= features.mean(0)
features /= features.std(0)
train_plot(features, labels)
plt.savefig('figure5.png')
| mit |
NunoEdgarGub1/scikit-learn | examples/model_selection/plot_validation_curve.py | 229 | 1823 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
lfairchild/PmagPy | dialogs/ErMagicBuilder.py | 1 | 19590 | #!/usr/bin/env pythonw
# pylint: disable=W0612,C0111,C0103,W0201,C0301,E265
#============================================================================================
# LOG HEADER:
#============================================================================================
import os
import sys
import pandas as pd
import wx
import wx.grid
import wx.html
#import pdb
from . import pmag_widgets as pw
from pmagpy import find_pmag_dir
from pmagpy import builder2 as builder
from pmagpy import contribution_builder as cb
from pmagpy import data_model3 as data_model
#from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigCanvas \
#--------------------------------------------------------------
# MagIC model builder
#--------------------------------------------------------------
class MagIC_model_builder3(wx.Frame):
def __init__(self, WD, parent, contribution=None):
SIZE = wx.DisplaySize()
SIZE = (SIZE[0] * .95, SIZE[1] * .95)
wx.Frame.__init__(self, parent, wx.ID_ANY, size=SIZE,
name='ErMagicBuilder')
self.parent = parent
self.main_frame = self.Parent
self.panel = wx.ScrolledWindow(self)
self.panel.SetScrollbars(1, 1, 1, 1)
if sys.platform in ['win32', 'win64']:
self.panel.SetScrollbars(20, 20, 50, 50)
os.chdir(WD)
self.WD = os.getcwd()
self.site_lons = []
self.site_lats = []
# if ErMagic data object was not passed in,
# create one based on the working directory
if not contribution:
self.contribution = cb.Contribution(self.WD)
else:
self.contribution = contribution
# first propagate from measurements
self.contribution.propagate_measurement_info()
# then propagate from other tables
# (i.e., if sites are in specimens or samples but not measurements)
self.contribution.propagate_all_tables_info()
# then add in blank tables if any are missing
self.table_list = ["specimens", "samples", "sites", "locations", "ages"]
for table in self.table_list:
if table not in self.contribution.tables:
new_table = cb.MagicDataFrame(dtype=table,
dmodel=self.contribution.data_model)
self.contribution.tables[table] = new_table
self.SetTitle("Earth-Ref Magic Builder")
self.InitUI()
# hide mainframe, bind close event so that it closes the current window not the mainframe
self.parent.Hide()
self.parent.Bind(wx.EVT_MENU, lambda event: self.parent.menubar.on_quit(event, self), self.parent.menubar.file_quit)
def InitUI(self):
pnl1 = self.panel
box_sizers = []
self.text_controls = {}
self.info_options = {}
add_buttons = []
remove_buttons = []
if not self.contribution.data_model:
self.contribution.data_model = data_model.DataModel()
dmodel = self.contribution.data_model
for table in self.table_list:
N = self.table_list.index(table)
label = table
# make sure all tables have any actual headers (read from file)
# plus any required headers
reqd_headers = dmodel.get_reqd_headers(table)
if table in self.contribution.tables:
df_container = self.contribution.tables[table]
actual_headers = df_container.df.columns.union(reqd_headers)
else:
actual_headers = reqd_headers
# add any extra headers (i.e., reqd but not present), into the table
add_headers = actual_headers.difference(df_container.df.columns)
if table in ['sites', 'locations']:
if 'age' not in add_headers and 'age' not in actual_headers:
add_headers = add_headers.append(pd.Index(['age']))
for head in add_headers:
df_container.df[head] = None
# define actual (includes reqd) vs optional headers
actual_headers = df_container.df.columns
optional_headers = dmodel.dm[table].index.difference(actual_headers)
box_sizer = wx.StaticBoxSizer(wx.StaticBox(self.panel, wx.ID_ANY,
table), wx.VERTICAL)
box_sizers.append(box_sizer)
text_control = wx.TextCtrl(self.panel, id=-1, size=(210, 250),
style=wx.TE_MULTILINE | wx.TE_READONLY | wx.HSCROLL,
name=table)
self.text_controls[table] = text_control
info_option = wx.ListBox(choices=optional_headers, id=-1, name=table,
parent=self.panel, size=(200, 250), style=0)
self.info_options[table] = info_option
add_button = wx.Button(self.panel, id=-1, label='add', name=table)
add_buttons.append(add_button)
self.Bind(wx.EVT_BUTTON, self.on_add_button, add_button)
remove_button = wx.Button(self.panel, id=-1, label='remove', name=table)
self.Bind(wx.EVT_BUTTON, self.on_remove_button, remove_button)
#------
box_sizer.Add(wx.StaticText(pnl1, label='{} header list:'.format(table)),
wx.ALIGN_TOP)
box_sizer.Add(text_control, wx.ALIGN_TOP)
box_sizer.Add(wx.StaticText(pnl1, label='{} optional:'.format(table)),
flag=wx.ALIGN_TOP|wx.TOP, border=10)
box_sizer.Add(info_option, wx.ALIGN_TOP)
box_sizer.Add(add_button, wx.ALIGN_TOP)
box_sizer.Add(remove_button, wx.ALIGN_TOP)
# need headers
self.update_text_box(actual_headers, text_control)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
self.okButton = wx.Button(self.panel, wx.ID_OK, "&OK")
self.Bind(wx.EVT_BUTTON, self.on_okButton, self.okButton)
self.okButton.SetDefault()
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
self.Bind(wx.EVT_CLOSE, self.on_cancelButton)
self.helpButton = wx.Button(self.panel, wx.ID_ANY, '&Help')
self.Bind(wx.EVT_BUTTON, self.on_helpButton, self.helpButton)
hbox1.Add(self.okButton, flag=wx.ALL, border=5)
hbox1.Add(self.cancelButton, flag=wx.ALL, border=5)
hbox1.Add(self.helpButton, flag=wx.ALL, border=5)
#------
vbox=wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.AddSpacer(5)
for sizer in box_sizers:
hbox.Add(sizer, flag=wx.ALIGN_LEFT|wx.BOTTOM, border=5)
hbox.AddSpacer(5)
hbox.AddSpacer(5)
text = wx.StaticText(self.panel, label="Step 0:\nChoose the headers for your specimens, samples, sites, locations and ages text files.\nOnce you have selected all necessary headers, click the OK button to move on to step 1.\nFor more information, click the help button below.")
vbox.Add(text, flag=wx.ALIGN_LEFT|wx.ALL, border=20)
#vbox.AddSpacer(20)
vbox.Add(hbox)
vbox.AddSpacer(20)
vbox.Add(hbox1, flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(20)
# if they are not already present
# add some strongly-recommended categories to age text_box
if 'ages' in self.contribution.tables:
actual_age_headers = list(self.contribution.tables['ages'].df.columns)
else:
actual_age_headers = dmodel.get_reqd_headers('ages')
for extra_header in ['age', 'age_unit', 'site']:
if extra_header not in actual_age_headers:
actual_age_headers.append(extra_header)
self.contribution.tables['ages'].df[extra_header] = None
add_age_headers = list(set(actual_age_headers))
self.update_text_box(add_age_headers, self.text_controls['ages'])
self.panel.SetSizer(vbox)
vbox.Fit(self)
self.Show()
self.Centre()
# these two lines ensure that everything shows up
wx.CallAfter(self.Refresh)
self.Update()
def update_text_box(self, headers_list, text_control):
text = ""
for key in sorted(headers_list):
text = text + key + "\n"
text = text[:-1]
text_control.SetValue('')
text_control.SetValue(text)
self.Refresh()
### Button methods ###
def on_add_button(self, event):
table = event.GetEventObject().Name
text_control = self.text_controls[table]
info_option = self.info_options[table]
headers = list(self.contribution.tables[table].df.columns)
selName = info_option.GetStringSelection()
if selName not in headers:
self.contribution.tables[table].df[selName] = None
headers.append(selName)
self.update_text_box(headers, text_control)
def on_remove_button(self, event):
table = event.GetEventObject().Name
info_option = self.info_options[table]
text_control = self.text_controls[table]
headers = list(self.contribution.tables[table].df.columns)
selName = str(info_option.GetStringSelection())
if selName in headers: # and selName not in reqd_header:
del self.contribution.tables[table].df[selName]
headers.remove(selName)
self.update_text_box(headers, text_control)
def on_okButton(self, event):
os.chdir(self.WD)
# update headers properly
for table in ['specimens', 'samples', 'sites', 'locations', 'ages']:
headers = self.text_controls[table].GetValue().split('\n')
for header in headers:
if header not in self.contribution.tables[table].df.columns:
#print "adding", header, "to", table
self.contribution.tables[table].df[header] = None
# take out unnecessary headers
self.main_frame.init_check_window()
self.Destroy()
def on_cancelButton(self, event):
self.Destroy()
self.main_frame.Show()
self.main_frame.Raise()
def on_helpButton(self, event):
#for use on the command line
path = find_pmag_dir.get_pmag_dir()
# for use with pyinstaller:
#path = self.Parent.resource_dir
help_page = os.path.join(path, 'dialogs', 'help_files', 'ErMagicHeadersHelp.html')
# if using with py2app, the directory structure is flat,
# so check to see where the resource actually is
if not os.path.exists(help_page):
help_page = os.path.join(path, 'help_files', 'ErMagicHeadersHelp.html')
html_frame = pw.HtmlFrame(self, page=help_page)
html_frame.Center()
html_frame.Show()
class MagIC_model_builder(wx.Frame):
""""""
#----------------------------------------------------------------------
def __init__(self, WD, parent, ErMagic_data=None):
SIZE = wx.DisplaySize()
SIZE = (SIZE[0] * .95, SIZE[1] * .95)
wx.Frame.__init__(self, parent, wx.ID_ANY, size=SIZE,
name='ErMagicBuilder')
#self.panel = wx.Panel(self)
self.main_frame = self.Parent
self.panel = wx.ScrolledWindow(self)
self.panel.SetScrollbars(1, 1, 1, 1)
if sys.platform in ['win32', 'win64']:
self.panel.SetScrollbars(20, 20, 50, 50)
os.chdir(WD)
self.WD = os.getcwd()
self.site_lons = []
self.site_lats = []
# if ErMagic data object was not passed in,
# create one based on the working directory
if not ErMagic_data:
self.er_magic = builder.ErMagicBuilder(self.WD)
else:
self.er_magic= ErMagic_data
print('-I- Read in any available data from working directory')
self.er_magic.get_all_magic_info()
print('-I- Initializing headers')
self.er_magic.init_default_headers()
self.er_magic.init_actual_headers()
self.SetTitle("Earth-Ref Magic Builder" )
self.InitUI()
def InitUI(self):
pnl1 = self.panel
table_list = ["specimen", "sample", "site", "location", "age"]
box_sizers = []
self.text_controls = {}
self.info_options = {}
add_buttons = []
remove_buttons = []
for table in table_list:
N = table_list.index(table)
label = table
optional_headers = self.er_magic.headers[label]['er'][2]
actual_headers = self.er_magic.headers[label]['er'][0]
box_sizer = wx.StaticBoxSizer(wx.StaticBox(self.panel, wx.ID_ANY,
table), wx.VERTICAL)
box_sizers.append(box_sizer)
text_control = wx.TextCtrl(self.panel, id=-1, size=(210, 250),
style=wx.TE_MULTILINE | wx.TE_READONLY | wx.HSCROLL,
name=table)
self.text_controls[table] = text_control
info_option = wx.ListBox(choices=optional_headers, id=-1, name=table,
parent=self.panel, size=(200, 250), style=0)
self.info_options[table] = info_option
add_button = wx.Button(self.panel, id=-1, label='add', name=table)
add_buttons.append(add_button)
self.Bind(wx.EVT_BUTTON, self.on_add_button, add_button)
remove_button = wx.Button(self.panel, id=-1, label='remove', name=table)
self.Bind(wx.EVT_BUTTON, self.on_remove_button, remove_button)
#------
box_sizer.Add(wx.StaticText(pnl1, label='{} header list:'.format(table)),
wx.ALIGN_TOP)
box_sizer.Add(text_control, wx.ALIGN_TOP)
box_sizer.Add(wx.StaticText(pnl1, label='{} optional:'.format(table)),
flag=wx.ALIGN_TOP|wx.TOP, border=10)
box_sizer.Add(info_option, wx.ALIGN_TOP)
box_sizer.Add(add_button, wx.ALIGN_TOP)
box_sizer.Add(remove_button, wx.ALIGN_TOP)
# need headers
self.update_text_box(actual_headers, text_control)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
self.okButton = wx.Button(self.panel, wx.ID_OK, "&OK")
self.Bind(wx.EVT_BUTTON, self.on_okButton, self.okButton)
self.okButton.SetDefault()
self.cancelButton = wx.Button(self.panel, wx.ID_CANCEL, '&Cancel')
self.Bind(wx.EVT_BUTTON, self.on_cancelButton, self.cancelButton)
self.helpButton = wx.Button(self.panel, wx.ID_ANY, '&Help')
self.Bind(wx.EVT_BUTTON, self.on_helpButton, self.helpButton)
hbox1.Add(self.okButton, flag=wx.ALL, border=5)
hbox1.Add(self.cancelButton, flag=wx.ALL, border=5)
hbox1.Add(self.helpButton, flag=wx.ALL, border=5)
#------
vbox=wx.BoxSizer(wx.VERTICAL)
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.AddSpacer(5)
for sizer in box_sizers:
hbox.Add(sizer, flag=wx.ALIGN_LEFT|wx.BOTTOM, border=5)
hbox.AddSpacer(5)
hbox.AddSpacer(5)
text = wx.StaticText(self.panel, label="Step 0:\nChoose the headers for your specimens, samples, sites, locations and ages tables.\nOnce you have selected all necessary headers, click the OK button to move on to step 1.\nFor more information, click the help button below.")
vbox.Add(text, flag=wx.ALIGN_LEFT|wx.ALL, border=20)
#vbox.AddSpacer(20)
vbox.Add(hbox)
vbox.AddSpacer(20)
vbox.Add(hbox1, flag=wx.ALIGN_CENTER_HORIZONTAL)
vbox.AddSpacer(20)
# if they are not already present
# add some strongly-recommended categories to age text_box
actual_age_headers = self.er_magic.headers['age']['er'][0]
for extra_header in ['age', 'age_unit']:
if extra_header not in actual_age_headers:
actual_age_headers.append(extra_header)
add_age_headers = list(set(actual_age_headers))
self.update_text_box(add_age_headers, self.text_controls['age'])
self.panel.SetSizer(vbox)
vbox.Fit(self)
self.Show()
self.Centre()
# these two lines ensure that everything shows up
wx.CallAfter(self.Refresh)
self.Update()
def update_text_box(self, headers_list, text_control):
text = ""
#command="keys=self.%s_header"%table
#exec command
for key in sorted(headers_list):
text = text + key + "\n"
text = text[:-1]
text_control.SetValue('')
text_control.SetValue(text)
self.Refresh()
### Button methods ###
def on_add_button(self, event):
table = event.GetEventObject().Name
text_control = self.text_controls[table]
info_option = self.info_options[table]
header = self.er_magic.headers[table]['er'][0]
selName = info_option.GetStringSelection()
if selName not in header:
header.append(selName)
self.update_text_box(header, text_control)
def on_remove_button(self, event):
table = event.GetEventObject().Name
info_option = self.info_options[table]
text_control = self.text_controls[table]
header = self.er_magic.headers[table]['er'][0]
reqd_header = self.er_magic.headers[table]['er'][1]
selName = str(info_option.GetStringSelection())
if selName in header and selName not in reqd_header:
header.remove(selName)
self.update_text_box(header, text_control)
def on_okButton(self, event):
os.chdir(self.WD)
# update headers properly
for table in ['specimen', 'sample', 'site', 'location', 'age']:
headers = self.text_controls[table].GetValue().split('\n')
for header in headers:
if header not in self.er_magic.headers[table]['er'][0]:
self.er_magic.headers[table]['er'][0].append(header)
# take out 'er_specimen_name' and other unnecessary headers
self.er_magic.headers[table]['er'][0] = builder.remove_list_headers(self.er_magic.headers[table]['er'][0])
self.main_frame.init_check_window2()
self.Destroy()
def on_cancelButton(self, event):
self.Destroy()
def on_helpButton(self, event):
#for use on the command line
path = find_pmag_dir.get_pmag_dir()
# for use with pyinstaller:
#path = self.Parent.resource_dir
help_page = os.path.join(path, 'dialogs', 'help_files', 'ErMagicBuilderHelp.html')
# if using with py2app, the directory structure is flat,
# so check to see where the resource actually is
if not os.path.exists(help_page):
help_page = os.path.join(path, 'help_files', 'ErMagicBuilderHelp.html')
html_frame = pw.HtmlFrame(self, page=help_page)
html_frame.Center()
html_frame.Show()
class HtmlWindow(wx.html.HtmlWindow):
def OnLinkClicked(self, link):
wx.LaunchDefaultBrowser(link.GetHref())
class MyHtmlPanel(wx.Frame):
def __init__(self, parent,HTML):
wx.Frame.__init__(self, parent, wx.ID_ANY, title="Help Window", size=(800,600))
html = HtmlWindow(self)
html.LoadPage(HTML)
#self.Show()
| bsd-3-clause |
vigilv/scikit-learn | sklearn/neighbors/tests/test_dist_metrics.py | 230 | 5234 | import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from nose import SkipTest
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
| bsd-3-clause |
avisochek/scastrap_data_pipeline | clustering_sandbox/clustering_algorithms.py | 1 | 4567 | #!Clustering Algorithms
## Design clustering algorithms here that
## will be tested out on the data.
## Each clustering algorithm should be specified
## as a function that takes as arguments
## a list of longitude coordinates, a list of
## latitude coordinates and the city info, and returns
## a list of "clusters", each cluster being a list
## of indices of the coordinates involved...
## imports:
import math
from sklearn.cluster import KMeans
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import AffinityPropagation
from sklearn.cluster import DBSCAN
import numpy as np
import os
## used to find distances between points:
import geopy
from geopy import distance
import csv
## function to translate between
## cluster labels and nested indices
def labels_to_index(cluster_labels):
cluster_indices=[]
for label in list(set(cluster_labels)):
cluster_index = (cluster_labels==label).nonzero()[0].tolist()
cluster_indices.append(cluster_index)
return cluster_indices
## here's k-means clustering as an example of
## how to construct the clustering algorithm
def k_means_clustering(lngs,lats,city, cluster_diameter):
city_lat=city["lat"];
city_lng=city["lng"]
## scale the longitudinal axis to appriximate
## cartesian coordinates...
lngs = np.array(lngs)*math.cos(city_lat)
## using n_issues/5 to determine k
## not the most objective method, but its a start...\
n_clusters = int(len(lngs)/10.)
if n_clusters>0:
kmeans = KMeans(n_clusters=n_clusters)
kmeans.fit(np.array([lngs,lats]).transpose())
cluster_labels = np.array(kmeans.labels_)
## use labels_to_index function to get
## output from cluster labels...
return labels_to_index(cluster_labels)
else:
return []
def get_distance(coord1,coord2):
R=3961.*5280
dlon = (math.pi/180.)*(coord1[0]-coord2[0])*math.cos(coord1[1]*math.pi/180)
dlat = (math.pi/180.)*(coord1[1]-coord2[1])
a=dlat**2.+dlon**2.
c=math.sqrt(a)
d=R*c
return d
def mcl(lngs,lats,city, cluster_diameter):
lat_to_feet_multiplier=288200.
# lng_multiplier=math.cos(city["lat"])
# city_lng=city["lng"]
# city_lat=city["lat"]
## generate graph
graph=[]
used_inds=[]
for i in range(len(lngs)):
for j in range(i+1,len(lngs)):
distance=get_distance([lngs[i],lats[i]],[lngs[j],lats[j]])
# distance=geopy.distance.vincenty(
# tuple([lngs[i],lats[i]]),
# tuple([lngs[j],lats[j]])).feet
if distance<cluster_diameter:
graph.append([i,j,1-distance/(1.5*(cluster_diameter))])
## write graph to mcl input file
with open("mcl_data/mcl_input_data.tsv","w") as f:
for row in graph:
f.write(str(row[0])+"\t"+str(row[1])+"\t"+str(row[2])+"\n")
## run mcl using command line
os.system("mcl mcl_data/mcl_input_data.tsv -I 2 --abc -o mcl_data/mcl_output_data.tsv")
output_data=[]
## read in output file from previous step
with open("mcl_data/mcl_output_data.tsv","r") as f:
tsvin = csv.reader(f,delimiter='\t')
for row in tsvin:
int_row=[]
for ind in row:
int_row.append(int(ind))
output_data.append(int_row)
return output_data
def agglom(lngs, lats, city, cluster_diameter):
city_area = city["area"]
city_lng=city["lng"]
city_lat=city["lat"]
lngs = np.array(lngs)*math.cos(city_lat)
n_clusters=int(city_area/(cluster_diameter**2))
agglomerative = AgglomerativeClustering(n_clusters = n_clusters)
agglomerative.fit(np.array([lngs, lats]).transpose())
cluster_labels = np.array(agglomerative.labels_)
return labels_to_index(cluster_labels)
def affinityprop(lngs, lats, city, cluster_diameter):
city_area = city["area"]
city_lng = city["lng"]
city_lat = city["lat"]
lngs = np.array(lngs)#*(math.cos(city["lat"])**2)
affinity = AffinityPropagation(damping=0.75, max_iter=200, convergence_iter=15, copy=True, preference=None, affinity='euclidean', verbose=False)
affinity.fit(np.array([lngs, lats]).transpose())
cluster_labels = np.array(affinity.labels_)
return labels_to_index(cluster_labels)
def db(lngs, lats, city, cluster_diameter):
city_area = city["area"]
city_lng = city["lng"]
city_lat = city["lat"]
lngs = np.array(lngs)*math.cos(city_lat)
dbscan = DBSCAN(metric='euclidean')
dbscan.fit(np.array([lngs, lats]).transpose())
cluster_labels = np.array(dbscan.labels_)
return labels_to_index(cluster_labels)
| gpl-3.0 |
q1ang/seaborn | doc/sphinxext/ipython_directive.py | 37 | 37557 | # -*- coding: utf-8 -*-
"""
Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives). For example, to enable syntax highlighting
and the IPython directive::
extensions = ['IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive']
The IPython directive outputs code-blocks with the language 'ipython'. So
if you do not have the syntax highlighting extension enabled as well, then
all rendered code-blocks will be uncolored. By default this directive assumes
that your prompts are unchanged IPython ones, but this can be customized.
The configurable options that can be placed in conf.py are:
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_mplbackend:
The string which specifies if the embedded Sphinx shell should import
Matplotlib and set the backend. The value specifies a backend that is
passed to `matplotlib.use()` before any lines in `ipython_execlines` are
executed. If not specified in conf.py, then the default value of 'agg' is
used. To use the IPython directive without matplotlib as a dependency, set
the value to `None`. It may end up that matplotlib is still imported
if the user specifies so in `ipython_execlines` or makes use of the
@savefig pseudo decorator.
ipython_execlines:
A list of strings to be exec'd in the embedded Sphinx shell. Typical
usage is to make certain packages always available. Set this to an empty
list if you wish to have no imports always available. If specified in
conf.py as `None`, then it has the effect of making no imports available.
If omitted from conf.py altogether, then the default value of
['import numpy as np', 'import matplotlib.pyplot as plt'] is used.
ipython_holdcount
When the @suppress pseudo-decorator is used, the execution count can be
incremented or not. The default behavior is to hold the execution count,
corresponding to a value of `True`. Set this to `False` to increment
the execution count after each suppressed command.
As an example, to use the IPython directive when `matplotlib` is not available,
one sets the backend to `None`::
ipython_mplbackend = None
An example usage of the directive is:
.. code-block:: rst
.. ipython::
In [1]: x = 1
In [2]: y = x**2
In [3]: print(y)
See http://matplotlib.org/sampledoc/ipython_directive.html for additional
documentation.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
from __future__ import print_function
from __future__ import unicode_literals
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import os
import re
import sys
import tempfile
import ast
from pandas.compat import zip, range, map, lmap, u, cStringIO as StringIO
import warnings
# To keep compatibility with various python versions
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Third-party
import sphinx
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.util.compat import Directive
# Our own
from IPython import Config, InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
from IPython.utils.py3compat import PY3
if PY3:
from io import StringIO
text_type = str
else:
from StringIO import StringIO
text_type = unicode
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = range(3)
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
nextline = nextline[Nc:]
if nextline and nextline[0] == ' ':
nextline = nextline[1:]
inputline += '\n' + nextline
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class DecodingStringIO(StringIO, object):
def __init__(self,buf='',encodings=('utf8',), *args, **kwds):
super(DecodingStringIO, self).__init__(buf, *args, **kwds)
self.set_encodings(encodings)
def set_encodings(self, encodings):
self.encodings = encodings
def write(self,data):
if isinstance(data, text_type):
return super(DecodingStringIO, self).write(data)
else:
for enc in self.encodings:
try:
data = data.decode(enc)
return super(DecodingStringIO, self).write(data)
except :
pass
# default to brute utf8 if no encoding succeded
return super(DecodingStringIO, self).write(data.decode('utf8', 'replace'))
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self, exec_lines=None,state=None):
self.cout = DecodingStringIO(u'')
if exec_lines is None:
exec_lines = []
self.state = state
# Create config object for IPython
config = Config()
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize global ipython, but don't start its mainloop.
# This will persist across different EmbededSphinxShell instances.
IP = InteractiveShell.instance(config=config, profile_dir=profile)
# io.stdout redirect must be done after instantiating InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#io.stdout = Tee(self.cout, channel='stdout') # dbg
#io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# Optionally, provide more detailed information to shell.
self.directive = None
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
# Prepopulate the namespace.
for line in exec_lines:
self.process_input_line(line, store_history=False)
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
try:
source_raw = splitter.source_raw_reset()[1]
except:
# recent ipython #4504
source_raw = splitter.raw_reset()
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""
Process data block for INPUT token.
"""
decorator, input, rest = data
image_file = None
image_directive = None
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = (decorator is not None and \
decorator.startswith('@doctest')) or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_okexcept = decorator=='@okexcept' or self.is_okexcept
is_okwarning = decorator=='@okwarning' or self.is_okwarning
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
# set the encodings to be used by DecodingStringIO
# to convert the execution output into unicode if
# needed. this attrib is set by IpythonDirective.run()
# based on the specified block options, defaulting to ['ut
self.cout.set_encodings(self.output_encoding)
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
# Hold the execution count, if requested to do so.
if is_suppress and self.hold_count:
store_history = False
else:
store_history = True
# Note: catch_warnings is not thread safe
with warnings.catch_warnings(record=True) as ws:
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i == 0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output)
elif is_semicolon: # get spacing right
ret.append('')
# context information
filename = self.state.document.current_source
lineno = self.state.document.current_line
# output any exceptions raised during execution to stdout
# unless :okexcept: has been specified.
if not is_okexcept and "Traceback" in output:
s = "\nException in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okexcept: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write(output)
sys.stdout.write('<<<' + ('-' * 73) + '\n\n')
# output any warning raised during execution to stdout
# unless :okwarning: has been specified.
if not is_okwarning:
for w in ws:
s = "\nWarning in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okwarning: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write('-' * 76 + '\n')
s=warnings.formatwarning(w.message, w.category,
w.filename, w.lineno, w.line)
sys.stdout.write(s)
sys.stdout.write('<<<' + ('-' * 73) + '\n')
self.cout.truncate(0)
return (ret, input_lines, output, is_doctest, decorator, image_file,
image_directive)
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, decorator, image_file):
"""
Process data block for OUTPUT token.
"""
TAB = ' ' * 4
if is_doctest and output is not None:
found = output
found = found.strip()
submitted = data.strip()
if self.directive is None:
source = 'Unavailable'
content = 'Unavailable'
else:
source = self.directive.state.document.current_source
content = self.directive.content
# Add tabs and join into a single string.
content = '\n'.join([TAB + line for line in content])
# Make sure the output contains the output prompt.
ind = found.find(output_prompt)
if ind < 0:
e = ('output does not contain output prompt\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'Input line(s):\n{TAB}{2}\n\n'
'Output line(s):\n{TAB}{3}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), TAB=TAB)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
# Handle the actual doctest comparison.
if decorator.strip() == '@doctest':
# Standard doctest
if found != submitted:
e = ('doctest failure\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'On input line(s):\n{TAB}{2}\n\n'
'we found output:\n{TAB}{3}\n\n'
'instead of the expected:\n{TAB}{4}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), repr(submitted), TAB=TAB)
raise RuntimeError(e)
else:
self.custom_doctest(decorator, input_lines, found, submitted)
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = ('plt.gcf().savefig("%s", bbox_inches="tight", '
'dpi=100)' % image_file)
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin % lineno
output_prompt = self.promptout % lineno
image_file = None
image_directive = None
for token, data in block:
if token == COMMENT:
out_data = self.process_comment(data)
elif token == INPUT:
(out_data, input_lines, output, is_doctest, decorator,
image_file, image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token == OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
decorator, image_file)
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
"""
Ensures that pyplot has been imported into the embedded IPython shell.
Also, makes sure to set the backend appropriately if not set already.
"""
# We are here if the @figure pseudo decorator was used. Thus, it's
# possible that we could be here even if python_mplbackend were set to
# `None`. That's also strange and perhaps worthy of raising an
# exception, but for now, we just set the backend to 'agg'.
if not self._pyplot_imported:
if 'matplotlib.backends' not in sys.modules:
# Then ipython_matplotlib was set to None but there was a
# call to the @figure decorator (and ipython_execlines did
# not set a backend).
#raise Exception("No backend was set, but @figure was used!")
import matplotlib
matplotlib.use('agg')
# Always import pyplot into embedded shell.
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
self._pyplot_imported = True
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive content
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with lines checking for multiline
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
if not multiline:
modified = u"%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append(u'')
except Exception: # on a multiline
multiline = True
multiline_start = lineno
else: # still on a multiline
modified = u'%s %s' % (continuation, line)
output.append(modified)
# if the next line is indented, it should be part of multiline
if len(content) > lineno + 1:
nextline = content[lineno + 1]
if len(nextline) - len(nextline.lstrip()) > 3:
continue
try:
mod = ast.parse(
'\n'.join(content[multiline_start:lineno+1]))
if isinstance(mod.body[0], ast.FunctionDef):
# check to see if we have the whole function
for element in mod.body[0].body:
if isinstance(element, ast.Return):
multiline = False
else:
output.append(u'')
multiline = False
except Exception:
pass
if savefig: # clear figure if plotted
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
return output
def custom_doctest(self, decorator, input_lines, found, submitted):
"""
Perform a specialized doctest.
"""
from .custom_doctests import doctests
args = decorator.split()
doctest_type = args[1]
if doctest_type in doctests:
doctests[doctest_type](self, args, input_lines, found, submitted)
else:
e = "Invalid option to @doctest: {0}".format(doctest_type)
raise Exception(e)
class IPythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
'okexcept': directives.flag,
'okwarning': directives.flag,
'output_encoding': directives.unchanged_required
}
shell = None
seen_docs = set()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
confdir = self.state.document.settings.env.app.confdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path
if isinstance(savefig_dir, list):
savefig_dir = savefig_dir[0] # safe to assume only one path?
savefig_dir = os.path.join(confdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
mplbackend = config.ipython_mplbackend
exec_lines = config.ipython_execlines
hold_count = config.ipython_holdcount
return (savefig_dir, source_dir, rgxin, rgxout,
promptin, promptout, mplbackend, exec_lines, hold_count)
def setup(self):
# Get configuration values.
(savefig_dir, source_dir, rgxin, rgxout, promptin, promptout,
mplbackend, exec_lines, hold_count) = self.get_config_options()
if self.shell is None:
# We will be here many times. However, when the
# EmbeddedSphinxShell is created, its interactive shell member
# is the same for each instance.
if mplbackend:
import matplotlib
# Repeated calls to use() will not hurt us since `mplbackend`
# is the same each time.
matplotlib.use(mplbackend)
# Must be called after (potentially) importing matplotlib and
# setting its backend since exec_lines might import pylab.
self.shell = EmbeddedSphinxShell(exec_lines, self.state)
# Store IPython directive to enable better error messages
self.shell.directive = self
# reset the execution count if we haven't processed this doc
#NOTE: this may be borked if there are multiple seen_doc tmp files
#check time stamp?
if not self.state.document.current_source in self.seen_docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
self.shell.IP.prompt_manager.width = 0
self.seen_docs.add(self.state.document.current_source)
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
self.shell.hold_count = hold_count
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
self.shell.is_okexcept = 'okexcept' in options
self.shell.is_okwarning = 'okwarning' in options
self.shell.output_encoding = [options.get('output_encoding', 'utf8')]
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python(content)
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython', '']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
if len(lines)>2:
if debug:
print('\n'.join(lines))
else:
# This has to do with input, not output. But if we comment
# these lines out, then no IPython code will appear in the
# final output.
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
# cleanup
self.teardown()
return []
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IPythonDirective)
app.add_config_value('ipython_savefig_dir', None, 'env')
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_promptin', 'In [%d]:', 'env')
app.add_config_value('ipython_promptout', 'Out[%d]:', 'env')
# We could just let matplotlib pick whatever is specified as the default
# backend in the matplotlibrc file, but this would cause issues if the
# backend didn't work in headless environments. For this reason, 'agg'
# is a good default backend choice.
app.add_config_value('ipython_mplbackend', 'agg', 'env')
# If the user sets this config value to `None`, then EmbeddedSphinxShell's
# __init__ method will treat it as [].
execlines = ['import numpy as np', 'import matplotlib.pyplot as plt']
app.add_config_value('ipython_execlines', execlines, 'env')
app.add_config_value('ipython_holdcount', True, 'env')
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
@doctest float
In [154]: 0.1 + 0.2
Out[154]: 0.3
@doctest float
In [155]: np.arange(16).reshape(4,4)
Out[155]:
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
In [1]: x = np.arange(16, dtype=float).reshape(4,4)
In [2]: x[0,0] = np.inf
In [3]: x[0,1] = np.nan
@doctest float
In [4]: x
Out[4]:
array([[ inf, nan, 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
IPythonDirective('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print('All OK? Check figures in _static/')
| bsd-3-clause |
cmmorrow/sci-analysis | sci_analysis/data/data_operations.py | 1 | 7101 | """sci_analysis module: data_operations
Functions:
to_float - tries to convert a variable to a float.
flatten - recursively reduces the number of dimensions to 1.
drop_nan - removes values that are not a number from a Vector.
drop_nan_intersect - returns only numeric values from two Vectors.
is_vector - checks if a given sequence is a sci_analysis Vector object.
is_data - checks if a given sequence is a sci_analysis Data object.
is_tuple - checks if a given sequence is a tuple.
is_iterable - checks if a given variable is iterable, but not a string.
is_array - checks if a given sequence is a numpy Array object.
is_dict - checks if a given sequence is a dictionary.
is_group - checks if a given variable is a list of iterable objects.
is_group_dict - checks if a given variable is a dictionary of iterable objects.
"""
# from __future__ import absolute_import
import six
import numpy as np
import pandas as pd
def to_float(seq):
"""
Takes an arguement seq, tries to convert each value to a float and returns the result. If a value cannot be
converted to a float, it is replaced by 'nan'.
Parameters
----------
seq : array-like
The input object.
Returns
-------
subseq : array_like
seq with values converted to a float or "nan".
>>> to_float(['1', '2', '3', 'four', '5'])
[1.0, 2.0, 3.0, nan, 5.0]
"""
float_list = list()
for i in range(len(seq)):
try:
float_list.append(float(seq[i]))
except ValueError:
float_list.append(float("nan"))
except TypeError:
float_list.append(to_float(seq[i]))
return float_list
def flatten(seq):
"""
Recursively reduces the dimension of seq to one.
Parameters
----------
seq : array-like
The input object.
Returns
-------
subseq : array_like
A flattened copy of the input object.
Flatten a two-dimensional list into a one-dimensional list
>>> flatten([[1, 2, 3], [4, 5, 6]])
array([1, 2, 3, 4, 5, 6])
Flatten a three-dimensional list into a one-dimensional list
>>> flatten([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])
array([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
>>> flatten(([1, 2, 3], [4, 5, 6]))
array([1, 2, 3, 4, 5, 6])
>>> flatten(list(zip([1, 2, 3], [4, 5, 6])))
array([1, 4, 2, 5, 3, 6])
>>> flatten([(1, 2), (3, 4), (5, 6), (7, 8)])
array([1, 2, 3, 4, 5, 6, 7, 8])
"""
return np.array(seq).flatten()
def is_tuple(obj):
"""
Checks if a given sequence is a tuple.
Parameters
----------
obj : object
The input array.
Returns
-------
test result : bool
The test result of whether seq is a tuple or not.
>>> is_tuple(('a', 'b'))
True
>>> is_tuple(['a', 'b'])
False
>>> is_tuple(4)
False
"""
return True if isinstance(obj, tuple) else False
def is_iterable(obj):
"""
Checks if a given variable is iterable, but not a string.
Parameters
----------
obj : Any
The input argument.
Returns
-------
test result : bool
The test result of whether variable is iterable or not.
>>> is_iterable([1, 2, 3])
True
>>> is_iterable((1, 2, 3))
True
>>> is_iterable({'one': 1, 'two': 2, 'three': 3})
True
Strings arguments return False.
>>> is_iterable('foobar')
False
Scalars return False.
>>> is_iterable(42)
False
"""
if isinstance(obj, six.string_types):
return False
try:
obj.__iter__()
return True
except (AttributeError, TypeError):
return False
def is_array(obj):
"""
Checks if a given sequence is a numpy Array object.
Parameters
----------
obj : object
The input argument.
Returns
-------
test result : bool
The test result of whether seq is a numpy Array or not.
>>> import numpy as np
>>> is_array([1, 2, 3, 4, 5])
False
>>> is_array(np.array([1, 2, 3, 4, 5]))
True
"""
return hasattr(obj, 'dtype')
def is_series(obj):
"""
Checks if a given sequence is a Pandas Series object.
Parameters
----------
obj : object
The input argument.
Returns
-------
bool
>>> is_series([1, 2, 3])
False
>>> is_series(pd.Series([1, 2, 3]))
True
"""
return isinstance(obj, pd.Series)
def is_dict(obj):
"""
Checks if a given sequence is a dictionary.
Parameters
----------
obj : object
The input argument.
Returns
-------
test result : bool
The test result of whether seq is a dictionary or not.
>>> is_dict([1, 2, 3])
False
>>> is_dict((1, 2, 3))
False
>>> is_dict({'one': 1, 'two': 2, 'three': 3})
True
>>> is_dict('foobar')
False
"""
return isinstance(obj, dict)
def is_group(seq):
"""
Checks if a given variable is a list of iterable objects.
Parameters
----------
seq : array_like
The input argument.
Returns
-------
test result : bool
The test result of whether seq is a list of array_like values or not.
>>> is_group([[1, 2, 3], [4, 5, 6]])
True
>>> is_group({'one': 1, 'two': 2, 'three': 3})
False
>>> is_group(([1, 2, 3], [4, 5, 6]))
True
>>> is_group([1, 2, 3, 4, 5, 6])
False
>>> is_group({'foo': [1, 2, 3], 'bar': [4, 5, 6]})
False
"""
try:
if any(is_iterable(x) for x in seq):
return True
else:
return False
except TypeError:
return False
def is_dict_group(seq):
"""
Checks if a given variable is a dictionary of iterable objects.
Parameters
----------
seq : array-like
The input argument.
Returns
-------
test result : bool
The test result of whether seq is a dictionary of array_like values or not.
>>> is_dict_group([[1, 2, 3], [4, 5, 6]])
False
>>> is_dict_group(([1, 2, 3], [4, 5, 6]))
False
>>> is_dict_group([1, 2, 3, 4, 5, 6])
False
>>> is_dict_group({'foo': [1, 2, 3], 'bar': [4, 5, 6]})
True
"""
try:
if is_group(list(seq.values())):
return True
else:
return False
except (AttributeError, TypeError):
return False
def is_number(obj):
"""
Checks if the given object is a number.
Parameters
----------
obj : Any
The input argument.
Returns
-------
test result : bool
The test result of whether obj can be converted to a number or not.
>>> is_number(3)
True
>>> is_number(1.34)
True
>>> is_number('3')
True
>>> is_number(np.array(3))
True
>>> is_number('a')
False
>>> is_number([1, 2, 3])
False
>>> is_number(None)
False
"""
try:
float(obj)
return True
except (ValueError, TypeError):
return False
| mit |
chen0031/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/__init__.py | 69 | 28184 | """
This is an object-orient plotting library.
A procedural interface is provided by the companion pylab module,
which may be imported directly, e.g::
from pylab import *
or using ipython::
ipython -pylab
For the most part, direct use of the object-oriented library is
encouraged when programming rather than working interactively. The
exceptions are the pylab commands :func:`~matplotlib.pyplot.figure`,
:func:`~matplotlib.pyplot.subplot`,
:func:`~matplotlib.backends.backend_qt4agg.show`, and
:func:`~pyplot.savefig`, which can greatly simplify scripting.
Modules include:
:mod:`matplotlib.axes`
defines the :class:`~matplotlib.axes.Axes` class. Most pylab
commands are wrappers for :class:`~matplotlib.axes.Axes`
methods. The axes module is the highest level of OO access to
the library.
:mod:`matplotlib.figure`
defines the :class:`~matplotlib.figure.Figure` class.
:mod:`matplotlib.artist`
defines the :class:`~matplotlib.artist.Artist` base class for
all classes that draw things.
:mod:`matplotlib.lines`
defines the :class:`~matplotlib.lines.Line2D` class for
drawing lines and markers
:mod`matplotlib.patches`
defines classes for drawing polygons
:mod:`matplotlib.text`
defines the :class:`~matplotlib.text.Text`,
:class:`~matplotlib.text.TextWithDash`, and
:class:`~matplotlib.text.Annotate` classes
:mod:`matplotlib.image`
defines the :class:`~matplotlib.image.AxesImage` and
:class:`~matplotlib.image.FigureImage` classes
:mod:`matplotlib.collections`
classes for efficient drawing of groups of lines or polygons
:mod:`matplotlib.colors`
classes for interpreting color specifications and for making
colormaps
:mod:`matplotlib.cm`
colormaps and the :class:`~matplotlib.image.ScalarMappable`
mixin class for providing color mapping functionality to other
classes
:mod:`matplotlib.ticker`
classes for calculating tick mark locations and for formatting
tick labels
:mod:`matplotlib.backends`
a subpackage with modules for various gui libraries and output
formats
The base matplotlib namespace includes:
:data:`~matplotlib.rcParams`
a global dictionary of default configuration settings. It is
initialized by code which may be overridded by a matplotlibrc
file.
:func:`~matplotlib.rc`
a function for setting groups of rcParams values
:func:`~matplotlib.use`
a function for setting the matplotlib backend. If used, this
function must be called immediately after importing matplotlib
for the first time. In particular, it must be called
**before** importing pylab (if pylab is imported).
matplotlib is written by John D. Hunter (jdh2358 at gmail.com) and a
host of others.
"""
from __future__ import generators
__version__ = '0.98.5.2'
__revision__ = '$Revision: 6660 $'
__date__ = '$Date: 2008-12-18 06:10:51 -0600 (Thu, 18 Dec 2008) $'
import os, re, shutil, subprocess, sys, warnings
import distutils.sysconfig
import distutils.version
NEWCONFIG = False
# Needed for toolkit setuptools support
if 0:
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
pass # must not have setuptools
if not hasattr(sys, 'argv'): # for modpython
sys.argv = ['modpython']
"""
Manage user customizations through a rc file.
The default file location is given in the following order
- environment variable MATPLOTLIBRC
- HOME/.matplotlib/matplotlibrc if HOME is defined
- PATH/matplotlibrc where PATH is the return value of
get_data_path()
"""
import sys, os, tempfile
from rcsetup import defaultParams, validate_backend, validate_toolbar
from rcsetup import validate_cairo_format
major, minor1, minor2, s, tmp = sys.version_info
_python24 = major>=2 and minor1>=4
# the havedate check was a legacy from old matplotlib which preceeded
# datetime support
_havedate = True
#try:
# import pkg_resources # pkg_resources is part of setuptools
#except ImportError: _have_pkg_resources = False
#else: _have_pkg_resources = True
if not _python24:
raise ImportError('matplotlib requires Python 2.4 or later')
import numpy
nn = numpy.__version__.split('.')
if not (int(nn[0]) >= 1 and int(nn[1]) >= 1):
raise ImportError(
'numpy 1.1 or later is required; you have %s' % numpy.__version__)
def is_string_like(obj):
if hasattr(obj, 'shape'): return 0
try: obj + ''
except (TypeError, ValueError): return 0
return 1
def _is_writable_dir(p):
"""
p is a string pointing to a putative writable dir -- return True p
is such a string, else False
"""
try: p + '' # test is string like
except TypeError: return False
try:
t = tempfile.TemporaryFile(dir=p)
t.write('1')
t.close()
except OSError: return False
else: return True
class Verbose:
"""
A class to handle reporting. Set the fileo attribute to any file
instance to handle the output. Default is sys.stdout
"""
levels = ('silent', 'helpful', 'debug', 'debug-annoying')
vald = dict( [(level, i) for i,level in enumerate(levels)])
# parse the verbosity from the command line; flags look like
# --verbose-silent or --verbose-helpful
_commandLineVerbose = None
for arg in sys.argv[1:]:
if not arg.startswith('--verbose-'): continue
_commandLineVerbose = arg[10:]
def __init__(self):
self.set_level('silent')
self.fileo = sys.stdout
def set_level(self, level):
'set the verbosity to one of the Verbose.levels strings'
if self._commandLineVerbose is not None:
level = self._commandLineVerbose
if level not in self.levels:
raise ValueError('Illegal verbose string "%s". Legal values are %s'%(level, self.levels))
self.level = level
def set_fileo(self, fname):
std = {
'sys.stdout': sys.stdout,
'sys.stderr': sys.stderr,
}
if fname in std:
self.fileo = std[fname]
else:
try:
fileo = file(fname, 'w')
except IOError:
raise ValueError('Verbose object could not open log file "%s" for writing.\nCheck your matplotlibrc verbose.fileo setting'%fname)
else:
self.fileo = fileo
def report(self, s, level='helpful'):
"""
print message s to self.fileo if self.level>=level. Return
value indicates whether a message was issued
"""
if self.ge(level):
print >>self.fileo, s
return True
return False
def wrap(self, fmt, func, level='helpful', always=True):
"""
return a callable function that wraps func and reports it
output through the verbose handler if current verbosity level
is higher than level
if always is True, the report will occur on every function
call; otherwise only on the first time the function is called
"""
assert callable(func)
def wrapper(*args, **kwargs):
ret = func(*args, **kwargs)
if (always or not wrapper._spoke):
spoke = self.report(fmt%ret, level)
if not wrapper._spoke: wrapper._spoke = spoke
return ret
wrapper._spoke = False
wrapper.__doc__ = func.__doc__
return wrapper
def ge(self, level):
'return true if self.level is >= level'
return self.vald[self.level]>=self.vald[level]
verbose=Verbose()
def checkdep_dvipng():
try:
s = subprocess.Popen(['dvipng','-version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
line = s.stdout.readlines()[1]
v = line.split()[-1]
return v
except (IndexError, ValueError, OSError):
return None
def checkdep_ghostscript():
try:
if sys.platform == 'win32':
command_args = ['gswin32c', '--version']
else:
command_args = ['gs', '--version']
s = subprocess.Popen(command_args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
v = s.stdout.read()[:-1]
return v
except (IndexError, ValueError, OSError):
return None
def checkdep_tex():
try:
s = subprocess.Popen(['tex','-version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
line = s.stdout.readlines()[0]
pattern = '3\.1\d+'
match = re.search(pattern, line)
v = match.group(0)
return v
except (IndexError, ValueError, AttributeError, OSError):
return None
def checkdep_pdftops():
try:
s = subprocess.Popen(['pdftops','-v'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
for line in s.stderr:
if 'version' in line:
v = line.split()[-1]
return v
except (IndexError, ValueError, UnboundLocalError, OSError):
return None
def compare_versions(a, b):
"return True if a is greater than or equal to b"
if a:
a = distutils.version.LooseVersion(a)
b = distutils.version.LooseVersion(b)
if a>=b: return True
else: return False
else: return False
def checkdep_ps_distiller(s):
if not s:
return False
flag = True
gs_req = '7.07'
gs_sugg = '7.07'
gs_v = checkdep_ghostscript()
if compare_versions(gs_v, gs_sugg): pass
elif compare_versions(gs_v, gs_req):
verbose.report(('ghostscript-%s found. ghostscript-%s or later '
'is recommended to use the ps.usedistiller option.') % (gs_v, gs_sugg))
else:
flag = False
warnings.warn(('matplotlibrc ps.usedistiller option can not be used '
'unless ghostscript-%s or later is installed on your system') % gs_req)
if s == 'xpdf':
pdftops_req = '3.0'
pdftops_req_alt = '0.9' # poppler version numbers, ugh
pdftops_v = checkdep_pdftops()
if compare_versions(pdftops_v, pdftops_req):
pass
elif compare_versions(pdftops_v, pdftops_req_alt) and not \
compare_versions(pdftops_v, '1.0'):
pass
else:
flag = False
warnings.warn(('matplotlibrc ps.usedistiller can not be set to '
'xpdf unless xpdf-%s or later is installed on your system') % pdftops_req)
if flag:
return s
else:
return False
def checkdep_usetex(s):
if not s:
return False
tex_req = '3.1415'
gs_req = '7.07'
gs_sugg = '7.07'
dvipng_req = '1.5'
flag = True
tex_v = checkdep_tex()
if compare_versions(tex_v, tex_req): pass
else:
flag = False
warnings.warn(('matplotlibrc text.usetex option can not be used '
'unless TeX-%s or later is '
'installed on your system') % tex_req)
dvipng_v = checkdep_dvipng()
if compare_versions(dvipng_v, dvipng_req): pass
else:
flag = False
warnings.warn( 'matplotlibrc text.usetex can not be used with *Agg '
'backend unless dvipng-1.5 or later is '
'installed on your system')
gs_v = checkdep_ghostscript()
if compare_versions(gs_v, gs_sugg): pass
elif compare_versions(gs_v, gs_req):
verbose.report(('ghostscript-%s found. ghostscript-%s or later is '
'recommended for use with the text.usetex '
'option.') % (gs_v, gs_sugg))
else:
flag = False
warnings.warn(('matplotlibrc text.usetex can not be used '
'unless ghostscript-%s or later is '
'installed on your system') % gs_req)
return flag
def _get_home():
"""Find user's home directory if possible.
Otherwise raise error.
:see: http://mail.python.org/pipermail/python-list/2005-February/263921.html
"""
path=''
try:
path=os.path.expanduser("~")
except:
pass
if not os.path.isdir(path):
for evar in ('HOME', 'USERPROFILE', 'TMP'):
try:
path = os.environ[evar]
if os.path.isdir(path):
break
except: pass
if path:
return path
else:
raise RuntimeError('please define environment variable $HOME')
get_home = verbose.wrap('$HOME=%s', _get_home, always=False)
def _get_configdir():
"""
Return the string representing the configuration dir.
default is HOME/.matplotlib. you can override this with the
MPLCONFIGDIR environment variable
"""
configdir = os.environ.get('MPLCONFIGDIR')
if configdir is not None:
if not _is_writable_dir(configdir):
raise RuntimeError('Could not write to MPLCONFIGDIR="%s"'%configdir)
return configdir
h = get_home()
p = os.path.join(get_home(), '.matplotlib')
if os.path.exists(p):
if not _is_writable_dir(p):
raise RuntimeError("'%s' is not a writable dir; you must set %s/.matplotlib to be a writable dir. You can also set environment variable MPLCONFIGDIR to any writable directory where you want matplotlib data stored "% (h, h))
else:
if not _is_writable_dir(h):
raise RuntimeError("Failed to create %s/.matplotlib; consider setting MPLCONFIGDIR to a writable directory for matplotlib configuration data"%h)
os.mkdir(p)
return p
get_configdir = verbose.wrap('CONFIGDIR=%s', _get_configdir, always=False)
def _get_data_path():
'get the path to matplotlib data'
if 'MATPLOTLIBDATA' in os.environ:
path = os.environ['MATPLOTLIBDATA']
if not os.path.isdir(path):
raise RuntimeError('Path in environment MATPLOTLIBDATA not a directory')
return path
path = os.sep.join([os.path.dirname(__file__), 'mpl-data'])
if os.path.isdir(path): return path
# setuptools' namespace_packages may highjack this init file
# so need to try something known to be in matplotlib, not basemap
import matplotlib.afm
path = os.sep.join([os.path.dirname(matplotlib.afm.__file__), 'mpl-data'])
if os.path.isdir(path): return path
# py2exe zips pure python, so still need special check
if getattr(sys,'frozen',None):
path = os.path.join(os.path.split(sys.path[0])[0], 'mpl-data')
if os.path.isdir(path): return path
else:
# Try again assuming we need to step up one more directory
path = os.path.join(os.path.split(os.path.split(sys.path[0])[0])[0],
'mpl-data')
if os.path.isdir(path): return path
else:
# Try again assuming sys.path[0] is a dir not a exe
path = os.path.join(sys.path[0], 'mpl-data')
if os.path.isdir(path): return path
raise RuntimeError('Could not find the matplotlib data files')
def _get_data_path_cached():
if defaultParams['datapath'][0] is None:
defaultParams['datapath'][0] = _get_data_path()
return defaultParams['datapath'][0]
get_data_path = verbose.wrap('matplotlib data path %s', _get_data_path_cached,
always=False)
def get_example_data(fname):
"""
return a filehandle to one of the example files in mpl-data/example
*fname*
the name of one of the files in mpl-data/example
"""
datadir = os.path.join(get_data_path(), 'example')
fullpath = os.path.join(datadir, fname)
if not os.path.exists(fullpath):
raise IOError('could not find matplotlib example file "%s" in data directory "%s"'%(
fname, datadir))
return file(fullpath, 'rb')
def get_py2exe_datafiles():
datapath = get_data_path()
head, tail = os.path.split(datapath)
d = {}
for root, dirs, files in os.walk(datapath):
# Need to explicitly remove cocoa_agg files or py2exe complains
# NOTE I dont know why, but do as previous version
if 'Matplotlib.nib' in files:
files.remove('Matplotlib.nib')
files = [os.path.join(root, filename) for filename in files]
root = root.replace(tail, 'mpl-data')
root = root[root.index('mpl-data'):]
d[root] = files
return d.items()
def matplotlib_fname():
"""
Return the path to the rc file
Search order:
* current working dir
* environ var MATPLOTLIBRC
* HOME/.matplotlib/matplotlibrc
* MATPLOTLIBDATA/matplotlibrc
"""
oldname = os.path.join( os.getcwd(), '.matplotlibrc')
if os.path.exists(oldname):
print >> sys.stderr, """\
WARNING: Old rc filename ".matplotlibrc" found in working dir
and and renamed to new default rc file name "matplotlibrc"
(no leading"dot"). """
shutil.move('.matplotlibrc', 'matplotlibrc')
home = get_home()
oldname = os.path.join( home, '.matplotlibrc')
if os.path.exists(oldname):
configdir = get_configdir()
newname = os.path.join(configdir, 'matplotlibrc')
print >> sys.stderr, """\
WARNING: Old rc filename "%s" found and renamed to
new default rc file name "%s"."""%(oldname, newname)
shutil.move(oldname, newname)
fname = os.path.join( os.getcwd(), 'matplotlibrc')
if os.path.exists(fname): return fname
if 'MATPLOTLIBRC' in os.environ:
path = os.environ['MATPLOTLIBRC']
if os.path.exists(path):
fname = os.path.join(path, 'matplotlibrc')
if os.path.exists(fname):
return fname
fname = os.path.join(get_configdir(), 'matplotlibrc')
if os.path.exists(fname): return fname
path = get_data_path() # guaranteed to exist or raise
fname = os.path.join(path, 'matplotlibrc')
if not os.path.exists(fname):
warnings.warn('Could not find matplotlibrc; using defaults')
return fname
_deprecated_map = {
'text.fontstyle': 'font.style',
'text.fontangle': 'font.style',
'text.fontvariant': 'font.variant',
'text.fontweight': 'font.weight',
'text.fontsize': 'font.size',
'tick.size' : 'tick.major.size',
}
class RcParams(dict):
"""
A dictionary object including validation
validating functions are defined and associated with rc parameters in
:mod:`matplotlib.rcsetup`
"""
validate = dict([ (key, converter) for key, (default, converter) in \
defaultParams.iteritems() ])
def __setitem__(self, key, val):
try:
if key in _deprecated_map.keys():
alt = _deprecated_map[key]
warnings.warn('%s is deprecated in matplotlibrc. Use %s \
instead.'% (key, alt))
key = alt
cval = self.validate[key](val)
dict.__setitem__(self, key, cval)
except KeyError:
raise KeyError('%s is not a valid rc parameter.\
See rcParams.keys() for a list of valid parameters.'%key)
def rc_params(fail_on_error=False):
'Return the default params updated from the values in the rc file'
fname = matplotlib_fname()
if not os.path.exists(fname):
# this should never happen, default in mpl-data should always be found
message = 'could not find rc file; returning defaults'
ret = RcParams([ (key, default) for key, (default, converter) in \
defaultParams.iteritems() ])
warnings.warn(message)
return ret
cnt = 0
rc_temp = {}
for line in file(fname):
cnt += 1
strippedline = line.split('#',1)[0].strip()
if not strippedline: continue
tup = strippedline.split(':',1)
if len(tup) !=2:
warnings.warn('Illegal line #%d\n\t%s\n\tin file "%s"'%\
(cnt, line, fname))
continue
key, val = tup
key = key.strip()
val = val.strip()
if key in rc_temp:
warnings.warn('Duplicate key in file "%s", line #%d'%(fname,cnt))
rc_temp[key] = (val, line, cnt)
ret = RcParams([ (key, default) for key, (default, converter) in \
defaultParams.iteritems() ])
for key in ('verbose.level', 'verbose.fileo'):
if key in rc_temp:
val, line, cnt = rc_temp.pop(key)
if fail_on_error:
ret[key] = val # try to convert to proper type or raise
else:
try: ret[key] = val # try to convert to proper type or skip
except Exception, msg:
warnings.warn('Bad val "%s" on line #%d\n\t"%s"\n\tin file \
"%s"\n\t%s' % (val, cnt, line, fname, msg))
verbose.set_level(ret['verbose.level'])
verbose.set_fileo(ret['verbose.fileo'])
for key, (val, line, cnt) in rc_temp.iteritems():
if key in defaultParams:
if fail_on_error:
ret[key] = val # try to convert to proper type or raise
else:
try: ret[key] = val # try to convert to proper type or skip
except Exception, msg:
warnings.warn('Bad val "%s" on line #%d\n\t"%s"\n\tin file \
"%s"\n\t%s' % (val, cnt, line, fname, msg))
else:
print >> sys.stderr, """
Bad key "%s" on line %d in
%s.
You probably need to get an updated matplotlibrc file from
http://matplotlib.sf.net/_static/matplotlibrc or from the matplotlib source
distribution""" % (key, cnt, fname)
if ret['datapath'] is None:
ret['datapath'] = get_data_path()
if not ret['text.latex.preamble'] == ['']:
verbose.report("""
*****************************************************************
You have the following UNSUPPORTED LaTeX preamble customizations:
%s
Please do not ask for support with these customizations active.
*****************************************************************
"""% '\n'.join(ret['text.latex.preamble']), 'helpful')
verbose.report('loaded rc file %s'%fname)
return ret
# this is the instance used by the matplotlib classes
rcParams = rc_params()
rcParamsDefault = RcParams([ (key, default) for key, (default, converter) in \
defaultParams.iteritems() ])
rcParams['ps.usedistiller'] = checkdep_ps_distiller(rcParams['ps.usedistiller'])
rcParams['text.usetex'] = checkdep_usetex(rcParams['text.usetex'])
def rc(group, **kwargs):
"""
Set the current rc params. Group is the grouping for the rc, eg.
for ``lines.linewidth`` the group is ``lines``, for
``axes.facecolor``, the group is ``axes``, and so on. Group may
also be a list or tuple of group names, eg. (*xtick*, *ytick*).
*kwargs* is a dictionary attribute name/value pairs, eg::
rc('lines', linewidth=2, color='r')
sets the current rc params and is equivalent to::
rcParams['lines.linewidth'] = 2
rcParams['lines.color'] = 'r'
The following aliases are available to save typing for interactive
users:
===== =================
Alias Property
===== =================
'lw' 'linewidth'
'ls' 'linestyle'
'c' 'color'
'fc' 'facecolor'
'ec' 'edgecolor'
'mew' 'markeredgewidth'
'aa' 'antialiased'
===== =================
Thus you could abbreviate the above rc command as::
rc('lines', lw=2, c='r')
Note you can use python's kwargs dictionary facility to store
dictionaries of default parameters. Eg, you can customize the
font rc as follows::
font = {'family' : 'monospace',
'weight' : 'bold',
'size' : 'larger'}
rc('font', **font) # pass in the font dict as kwargs
This enables you to easily switch between several configurations.
Use :func:`~matplotlib.pyplot.rcdefaults` to restore the default
rc params after changes.
"""
aliases = {
'lw' : 'linewidth',
'ls' : 'linestyle',
'c' : 'color',
'fc' : 'facecolor',
'ec' : 'edgecolor',
'mew' : 'markeredgewidth',
'aa' : 'antialiased',
}
if is_string_like(group):
group = (group,)
for g in group:
for k,v in kwargs.items():
name = aliases.get(k) or k
key = '%s.%s' % (g, name)
if key not in rcParams:
raise KeyError('Unrecognized key "%s" for group "%s" and name "%s"' %
(key, g, name))
rcParams[key] = v
def rcdefaults():
"""
Restore the default rc params - the ones that were created at
matplotlib load time.
"""
rcParams.update(rcParamsDefault)
if NEWCONFIG:
#print "importing from reorganized config system!"
try:
from config import rcParams, rcdefaults, mplConfig, save_config
verbose.set_level(rcParams['verbose.level'])
verbose.set_fileo(rcParams['verbose.fileo'])
except:
from config import rcParams, rcdefaults
_use_error_msg = """ This call to matplotlib.use() has no effect
because the the backend has already been chosen;
matplotlib.use() must be called *before* pylab, matplotlib.pyplot,
or matplotlib.backends is imported for the first time.
"""
def use(arg, warn=True):
"""
Set the matplotlib backend to one of the known backends.
The argument is case-insensitive. For the Cairo backend,
the argument can have an extension to indicate the type of
output. Example:
use('cairo.pdf')
will specify a default of pdf output generated by Cairo.
Note: this function must be called *before* importing pylab for
the first time; or, if you are not using pylab, it must be called
before importing matplotlib.backends. If warn is True, a warning
is issued if you try and callthis after pylab or pyplot have been
loaded. In certain black magic use cases, eg
pyplot.switch_backends, we are doing the reloading necessary to
make the backend switch work (in some cases, eg pure image
backends) so one can set warn=False to supporess the warnings
"""
if 'matplotlib.backends' in sys.modules:
if warn: warnings.warn(_use_error_msg)
return
arg = arg.lower()
if arg.startswith('module://'):
name = arg
else:
be_parts = arg.split('.')
name = validate_backend(be_parts[0])
rcParams['backend'] = name
if name == 'cairo' and len(be_parts) > 1:
rcParams['cairo.format'] = validate_cairo_format(be_parts[1])
def get_backend():
"Returns the current backend"
return rcParams['backend']
def interactive(b):
"""
Set interactive mode to boolean b.
If b is True, then draw after every plotting command, eg, after xlabel
"""
rcParams['interactive'] = b
def is_interactive():
'Return true if plot mode is interactive'
b = rcParams['interactive']
return b
def tk_window_focus():
"""Return true if focus maintenance under TkAgg on win32 is on.
This currently works only for python.exe and IPython.exe.
Both IDLE and Pythonwin.exe fail badly when tk_window_focus is on."""
if rcParams['backend'] != 'TkAgg':
return False
return rcParams['tk.window_focus']
# Now allow command line to override
# Allow command line access to the backend with -d (matlab compatible
# flag)
for s in sys.argv[1:]:
if s.startswith('-d') and len(s) > 2: # look for a -d flag
try:
use(s[2:])
except (KeyError, ValueError):
pass
# we don't want to assume all -d flags are backends, eg -debug
verbose.report('matplotlib version %s'%__version__)
verbose.report('verbose.level %s'%verbose.level)
verbose.report('interactive is %s'%rcParams['interactive'])
verbose.report('units is %s'%rcParams['units'])
verbose.report('platform is %s'%sys.platform)
verbose.report('loaded modules: %s'%sys.modules.keys(), 'debug')
| agpl-3.0 |
macks22/scikit-learn | sklearn/linear_model/randomized_l1.py | 95 | 23365 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask, ConvergenceWarning)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], y_numeric=True)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=True
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
PatrickChrist/scikit-learn | sklearn/pipeline.py | 162 | 21103 | """
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Licence: BSD
from collections import defaultdict
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .utils.metaestimators import if_delegate_has_method
from .externals.six import iteritems
__all__ = ['Pipeline', 'FeatureUnion']
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Read more in the :ref:`User Guide <pipeline>`.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Attributes
----------
named_steps : dict
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.77...
>>> # getting the selected features chosen by anova_filter
>>> anova_svm.named_steps['anova'].get_support()
... # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, False, False, True, False, True, True, True,
False, False, True, False, True, False, False, False, False,
True], dtype=bool)
"""
# BaseEstimator interface
def __init__(self, steps):
names, estimators = zip(*steps)
if len(dict(steps)) != len(steps):
raise ValueError("Provided step names are not unique: %s" % (names,))
# shallow copy of steps
self.steps = tosequence(steps)
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps of the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
@property
def _estimator_type(self):
return self.steps[-1][1]._estimator_type
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(Pipeline, self).get_params(deep=False))
return out
@property
def named_steps(self):
return dict(self.steps)
@property
def _final_estimator(self):
return self.steps[-1][1]
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def fit_predict(self, X, y=None, **fit_params):
"""Applies fit_predict of last step in pipeline after transforms.
Applies fit_transforms of a pipeline to the data, followed by the
fit_predict method of the final estimator in the pipeline. Valid
only if the final estimator implements fit_predict.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
return self.steps[-1][-1].fit_predict(Xt, y, **fit_params)
@if_delegate_has_method(delegate='_final_estimator')
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict_log_proba(self, X):
"""Applies transforms to the data, and the predict_log_proba method of
the final estimator. Valid only if the final estimator implements
predict_log_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def inverse_transform(self, X):
"""Applies inverse transform to the data.
Starts with the last step of the pipeline and applies ``inverse_transform`` in
inverse order of the pipeline steps.
Valid only if all steps of the pipeline implement inverse_transform.
Parameters
----------
X : iterable
Data to inverse transform. Must fulfill output requirements of the
last step of the pipeline.
"""
if X.ndim == 1:
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score.
Parameters
----------
X : iterable
Data to score. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def classes_(self):
return self.steps[-1][-1].classes_
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, they will be given names
automatically based on their types.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB()) # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB())])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed * transformer_weights[name], transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed * transformer_weights[name], transformer
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed, transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed, transformer
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Read more in the :ref:`User Guide <feature_union>`.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(FeatureUnion, self).get_params(deep=False))
return out
def _update_transformer_list(self, transformers):
self.transformer_list[:] = [
(name, new)
for ((name, old), new) in zip(self.transformer_list, transformers)
]
# XXX it would be nice to have a keyword-only n_jobs argument to this function,
# but that's not allowed in Python 2.x.
def make_union(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca', PCA(copy=True, n_components=None,
whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return FeatureUnion(_name_estimators(transformers))
| bsd-3-clause |
yaojenkuo/BuildingMachineLearningSystemsWithPython | ch06/02_tuning.py | 22 | 5484 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
#
# This script trains tries to tweak hyperparameters to improve P/R AUC
#
import time
start_time = time.time()
import numpy as np
from sklearn.metrics import precision_recall_curve, roc_curve, auc
from sklearn.cross_validation import ShuffleSplit
from utils import plot_pr
from utils import load_sanders_data
from utils import tweak_labels
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import f1_score
from sklearn.naive_bayes import MultinomialNB
phase = "02"
def create_ngram_model(params=None):
tfidf_ngrams = TfidfVectorizer(ngram_range=(1, 3),
analyzer="word", binary=False)
clf = MultinomialNB()
pipeline = Pipeline([('vect', tfidf_ngrams), ('clf', clf)])
if params:
pipeline.set_params(**params)
return pipeline
def grid_search_model(clf_factory, X, Y):
cv = ShuffleSplit(
n=len(X), n_iter=10, test_size=0.3, random_state=0)
param_grid = dict(vect__ngram_range=[(1, 1), (1, 2), (1, 3)],
vect__min_df=[1, 2],
vect__stop_words=[None, "english"],
vect__smooth_idf=[False, True],
vect__use_idf=[False, True],
vect__sublinear_tf=[False, True],
vect__binary=[False, True],
clf__alpha=[0, 0.01, 0.05, 0.1, 0.5, 1],
)
grid_search = GridSearchCV(clf_factory(),
param_grid=param_grid,
cv=cv,
score_func=f1_score,
verbose=10)
grid_search.fit(X, Y)
clf = grid_search.best_estimator_
print(clf)
return clf
def train_model(clf, X, Y, name="NB ngram", plot=False):
# create it again for plotting
cv = ShuffleSplit(
n=len(X), n_iter=10, test_size=0.3, indices=True, random_state=0)
train_errors = []
test_errors = []
scores = []
pr_scores = []
precisions, recalls, thresholds = [], [], []
for train, test in cv:
X_train, y_train = X[train], Y[train]
X_test, y_test = X[test], Y[test]
clf.fit(X_train, y_train)
train_score = clf.score(X_train, y_train)
test_score = clf.score(X_test, y_test)
train_errors.append(1 - train_score)
test_errors.append(1 - test_score)
scores.append(test_score)
proba = clf.predict_proba(X_test)
fpr, tpr, roc_thresholds = roc_curve(y_test, proba[:, 1])
precision, recall, pr_thresholds = precision_recall_curve(
y_test, proba[:, 1])
pr_scores.append(auc(recall, precision))
precisions.append(precision)
recalls.append(recall)
thresholds.append(pr_thresholds)
if plot:
scores_to_sort = pr_scores
median = np.argsort(scores_to_sort)[len(scores_to_sort) / 2]
plot_pr(pr_scores[median], name, phase, precisions[median],
recalls[median], label=name)
summary = (np.mean(scores), np.std(scores),
np.mean(pr_scores), np.std(pr_scores))
print("%.3f\t%.3f\t%.3f\t%.3f\t" % summary)
return np.mean(train_errors), np.mean(test_errors)
def print_incorrect(clf, X, Y):
Y_hat = clf.predict(X)
wrong_idx = Y_hat != Y
X_wrong = X[wrong_idx]
Y_wrong = Y[wrong_idx]
Y_hat_wrong = Y_hat[wrong_idx]
for idx in range(len(X_wrong)):
print("clf.predict('%s')=%i instead of %i" %
(X_wrong[idx], Y_hat_wrong[idx], Y_wrong[idx]))
def get_best_model():
best_params = dict(vect__ngram_range=(1, 2),
vect__min_df=1,
vect__stop_words=None,
vect__smooth_idf=False,
vect__use_idf=False,
vect__sublinear_tf=True,
vect__binary=False,
clf__alpha=0.01,
)
best_clf = create_ngram_model(best_params)
return best_clf
if __name__ == "__main__":
X_orig, Y_orig = load_sanders_data()
classes = np.unique(Y_orig)
for c in classes:
print("#%s: %i" % (c, sum(Y_orig == c)))
print("== Pos vs. neg ==")
pos_neg = np.logical_or(Y_orig == "positive", Y_orig == "negative")
X = X_orig[pos_neg]
Y = Y_orig[pos_neg]
Y = tweak_labels(Y, ["positive"])
train_model(get_best_model(), X, Y, name="pos vs neg", plot=True)
print("== Pos/neg vs. irrelevant/neutral ==")
X = X_orig
Y = tweak_labels(Y_orig, ["positive", "negative"])
# best_clf = grid_search_model(create_ngram_model, X, Y, name="sent vs
# rest", plot=True)
train_model(get_best_model(), X, Y, name="pos vs neg", plot=True)
print("== Pos vs. rest ==")
X = X_orig
Y = tweak_labels(Y_orig, ["positive"])
train_model(get_best_model(), X, Y, name="pos vs rest",
plot=True)
print("== Neg vs. rest ==")
X = X_orig
Y = tweak_labels(Y_orig, ["negative"])
train_model(get_best_model(), X, Y, name="neg vs rest",
plot=True)
print("time spent:", time.time() - start_time)
| mit |
araichev/gtfstk | tests/test_cleaners.py | 1 | 4095 | from pandas.util.testing import assert_frame_equal, assert_series_equal
import numpy as np
from .context import gtfstk, sample
from gtfstk import *
def test_clean_column_names():
f = sample.routes.copy()
g = clean_column_names(f)
assert_frame_equal(f, g)
f = sample.routes.copy()
f[" route_id "] = f["route_id"].copy()
del f["route_id"]
g = clean_column_names(f)
assert "route_id" in g.columns
assert " route_id " not in g.columns
def test_clean_ids():
f1 = sample.copy()
f1.routes.loc[0, "route_id"] = " ho ho ho "
f2 = clean_ids(f1)
expect_rid = "ho_ho_ho"
f2.routes.loc[0, "route_id"] == expect_rid
f3 = clean_ids(f2)
f3 == f2
def test_clean_times():
f1 = sample.copy()
f1.stop_times["departure_time"].iat[0] = "7:00:00"
f1.frequencies["start_time"].iat[0] = "7:00:00"
f2 = clean_times(f1)
assert f2.stop_times["departure_time"].iat[0] == "07:00:00"
assert f2.frequencies["start_time"].iat[0] == "07:00:00"
def test_clean_route_short_names():
f1 = sample.copy()
# Should have no effect on a fine feed
f2 = clean_route_short_names(f1)
assert_series_equal(
f2.routes["route_short_name"], f1.routes["route_short_name"]
)
# Make route short name duplicates
f1.routes.loc[1:5, "route_short_name"] = np.nan
f1.routes.loc[6:, "route_short_name"] = " he llo "
f2 = clean_route_short_names(f1)
# Should have unique route short names
assert f2.routes["route_short_name"].nunique() == f2.routes.shape[0]
# NaNs should be replaced by n/a and route IDs
expect_rsns = ("n/a-" + sample.routes.iloc[1:5]["route_id"]).tolist()
assert (
f2.routes.iloc[1:5]["route_short_name"].values.tolist() == expect_rsns
)
# Should have names without leading or trailing whitespace
assert not f2.routes["route_short_name"].str.startswith(" ").any()
assert not f2.routes["route_short_name"].str.endswith(" ").any()
def test_drop_zombies():
# Should have no effect on sample feed
f1 = sample.copy()
f2 = drop_zombies(f1)
assert_frame_equal(f2.routes, f1.routes)
# Should drop stops with no stop times
f1 = sample.copy()
f1.stops["location_type"] = np.nan
stop_id = f1.stops.stop_id.iat[0]
st = f1.stop_times.copy()
st = st.loc[lambda x: x.stop_id != stop_id]
f1.stop_times = st
f2 = drop_zombies(f1)
assert not stop_id in f2.stops.stop_id.values
f2 = drop_zombies(f1)
assert_frame_equal(f2.routes, f1.routes)
# Create undefined parent stations
f1 = sample.copy()
f1.stops["parent_station"] = "bingo"
f2 = drop_zombies(f1)
assert f2.stops.parent_station.isna().all()
# Create all zombie trips for one route
rid = f1.routes["route_id"].iat[0]
cond = f1.trips["route_id"] == rid
f1.trips.loc[cond, "trip_id"] = "hoopla"
f2 = drop_zombies(f1)
# Trips should be gone
assert "hoopla" not in f2.trips["trip_id"]
# Route should be gone
assert rid not in f2.routes["route_id"]
def test_aggregate_routes():
feed1 = sample.copy()
# Equalize all route short names
feed1.routes["route_short_name"] = "bingo"
feed2 = aggregate_routes(feed1)
# feed2 should have only one route ID
assert feed2.routes.shape[0] == 1
# Feeds should have same trip data frames excluding
# route IDs
feed1.trips["route_id"] = feed2.trips["route_id"]
assert almost_equal(feed1.trips, feed2.trips)
# Feeds should have equal attributes excluding
# routes and trips data frames
feed2.routes = feed1.routes
feed2.trips = feed1.trips
assert feed1 == feed2
def test_clean():
f1 = sample.copy()
rid = f1.routes["route_id"].iat[0]
f1.routes["route_id"].iat[0] = " " + rid + " "
f2 = clean(f1)
assert f2.routes["route_id"].iat[0] == rid
assert_frame_equal(f2.trips, sample.trips)
def test_drop_invalid_columns():
f1 = sample.copy()
f1.routes["bingo"] = "bongo"
f1.trips["wingo"] = "wongo"
f2 = drop_invalid_columns(f1)
assert f2 == sample
| mit |
openp2pdesign/PyMakerspaces | makerlabs/fablabs_io.py | 1 | 9388 | # -*- encoding: utf-8 -*-
#
# Access data from fablabs.io
#
# Author: Massimo Menichinelli
# Homepage: http://www.openp2pdesign.org
# License: LGPL v.3
#
#
from classes import Lab
import json
import requests
from geojson import dumps, Feature, Point, FeatureCollection
from geopy.geocoders import Nominatim
import pycountry
from pycountry_convert import country_alpha2_to_continent_code
from time import sleep
import pandas as pd
# Geocoding variable
geolocator = Nominatim()
# Endpoints
fablabs_io_labs_api_url_v0 = "https://api.fablabs.io/v0/labs.json"
fablabs_io_projects_api_url_v0 = "https://api.fablabs.io/v0/projects.json"
class FabLab(Lab):
"""Represents a Fab Lab as it is described on fablabs.io."""
def __init__(self):
self.source = "fablabs.io"
self.lab_type = "Fab Lab"
class Project(object):
"""Represents a project as it is described on fablabs.io."""
def __init__(self):
self.id = ""
self.title = ""
self.description = ""
self.github = ""
self.web = ""
self.dropbox = ""
self.bitbucket = ""
self.lab_id = ""
self.lab = ""
self.owner_id = ""
self.created_at = ""
self.updated_at = ""
self.vimeo = ""
self.flickr = ""
self.youtube = ""
self.drive = ""
self.twitter = ""
self.facebook = ""
self.googleplus = ""
self.instagram = ""
self.status = ""
self.version = ""
self.faq = ""
self.scope = ""
self.community = ""
self.lookingfor = ""
self.cover = ""
self.type = "Project in a Fab Lab"
def data_from_fablabs_io(endpoint):
"""Gets data from fablabs.io."""
data = requests.get(endpoint).json()
return data
def get_labs(format):
"""Gets Fab Lab data from fablabs.io."""
fablabs_json = data_from_fablabs_io(fablabs_io_labs_api_url_v0)
fablabs = {}
# Load all the FabLabs
for i in fablabs_json["labs"]:
current_lab = FabLab()
current_lab.name = i["name"]
current_lab.address_1 = i["address_1"]
current_lab.address_2 = i["address_2"]
current_lab.address_notes = i["address_notes"]
current_lab.avatar = i["avatar_url"]
current_lab.blurb = i["blurb"]
current_lab.capabilities = i["capabilities"]
if i["city"].isupper():
i["city"] = i["city"].title()
current_lab.city = i["city"]
current_lab.country_code = i["country_code"]
current_lab.county = i["county"]
current_lab.description = i["description"]
current_lab.email = i["email"]
current_lab.id = i["id"]
current_lab.phone = i["phone"]
current_lab.postal_code = i["postal_code"]
current_lab.slug = i["slug"]
current_lab.url = i["url"]
current_lab.continent = country_alpha2_to_continent_code(i["country_code"].upper())
current_country = pycountry.countries.get(alpha_2=i["country_code"].upper())
current_lab.country_code = current_country.alpha_3
current_lab.country = current_country.name
# Check coordinates
if i["longitude"] is not None:
current_lab.longitude = i["longitude"]
else:
current_lab.longitude = 0.0
if i["latitude"] is not None:
current_lab.latitude = i["latitude"]
else:
current_lab.latitude = 0.0
# Find Facebook and Twitter links, add also the other ones
current_lab.links = {"facebook": "", "twitter": ""}
for link in i["links"]:
if "facebook" in link["url"]:
current_lab.links["facebook"] = link["url"]
elif "twitter" in link["url"]:
current_lab.links["twitter"] = link["url"]
else:
current_lab.links[link["id"]] = link["url"]
# Add the lab to the list
fablabs[i["slug"]] = current_lab
# Return a dictiornary / json
if format.lower() == "dict" or format.lower() == "json":
output = {}
for j in fablabs:
output[j] = fablabs[j].__dict__
# Return a geojson
elif format.lower() == "geojson" or format.lower() == "geo":
labs_list = []
for l in fablabs:
single = fablabs[l].__dict__
single_lab = Feature(
type="Feature",
geometry=Point((single["latitude"], single["longitude"])),
properties=single)
labs_list.append(single_lab)
output = dumps(FeatureCollection(labs_list))
# Return a Pandas DataFrame
elif format.lower() == "pandas" or format.lower() == "dataframe":
output = {}
for j in fablabs:
output[j] = fablabs[j].__dict__
# Transform the dict into a Pandas DataFrame
output = pd.DataFrame.from_dict(output)
output = output.transpose()
# Return an object
elif format.lower() == "object" or format.lower() == "obj":
output = fablabs
# Default: return an oject
else:
output = fablabs
# Return a proper json
if format.lower() == "json":
output = json.dumps(output)
return output
def labs_count():
"""Gets the number of current Fab Labs registered on fablabs.io."""
fablabs = data_from_fablabs_io(fablabs_io_labs_api_url_v0)
return len(fablabs["labs"])
def get_projects(format):
"""Gets projects data from fablabs.io."""
projects_json = data_from_fablabs_io(fablabs_io_projects_api_url_v0)
projects = {}
project_url = "https://www.fablabs.io/projects/"
fablabs = get_labs(format="object")
# Load all the FabLabs
for i in projects_json["projects"]:
i = i["projects"]
current_project = Project()
current_project.id = i["id"]
current_project.title = i["title"]
current_project.description = i["description"]
current_project.github = i["github"]
current_project.web = i["web"]
current_project.dropbox = i["dropbox"]
current_project.bitbucket = i["bitbucket"]
current_project.lab_id = i["lab_id"]
# Add the lab of the project
if i["lab_id"] is not None:
for k in fablabs:
if fablabs[k].id == i["lab_id"]:
current_project.lab = fablabs[k]
else:
current_project.lab = None
current_project.owner_id = i["owner_id"]
current_project.created_at = i["created_at"]
current_project.updated_at = i["updated_at"]
current_project.vimeo = i["vimeo"]
current_project.flickr = i["flickr"]
current_project.youtube = i["youtube"]
current_project.drive = i["drive"]
current_project.twitter = i["twitter"]
current_project.facebook = i["facebook"]
current_project.googleplus = i["googleplus"]
current_project.instagram = i["instagram"]
current_project.status = i["status"]
current_project.version = i["version"]
current_project.faq = i["faq"]
current_project.scope = i["scope"]
current_project.community = i["community"]
current_project.lookingfor = i["lookingfor"]
current_project.cover = i["cover"]
url = project_url + str(current_project.id)
current_project.url = url
# Add the project
projects[current_project.id] = current_project
# Return a dictiornary / json
if format.lower() == "dict" or format.lower() == "json":
output = {}
for j in projects:
project_dict = projects[j].__dict__
# Convert the lab from a Fab Lab object to a dict
if project_dict["lab"] is not None:
project_dict["lab"] = project_dict["lab"].__dict__
output[j] = project_dict
# Return a geojson, only for projects linked to a lab
elif format.lower() == "geojson" or format.lower() == "geo":
projects_list = []
for p in projects:
if projects[p].lab_id is not None:
single_project = projects[p].__dict__
if projects[p].lab is not None:
single_project["lab"] = single_project["lab"].__dict__
for l in fablabs:
single_lab = fablabs[l].__dict__
if single_lab["id"] == single_project["lab_id"]:
project_lab = Feature(
type="Feature",
geometry=Point((single_lab["latitude"],
single_lab["longitude"])),
properties=single_project)
projects_list.append(project_lab)
output = dumps(FeatureCollection(projects_list))
# Return an object
elif format.lower() == "object" or format.lower() == "obj":
output = projects
# Default: return an object
else:
output = projects
# Return a proper json
if format.lower() == "json":
output = json.dumps(output)
return output
def projects_count():
"""Gets the number of current projects submitted on fablabs.io."""
projects = data_from_fablabs_io(fablabs_io_projects_api_url_v0)
return len(projects["projects"])
if __name__ == "__main__":
print get_labs(format="json")
| lgpl-3.0 |
dclambert/pyensemble | model_library.py | 3 | 5363 | # -*- coding: utf8
# Author: David C. Lambert [dcl -at- panix -dot- com]
# Copyright(c) 2013
# License: Simple BSD
"""Utility module for building model library"""
from __future__ import print_function
import numpy as np
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_random_state
from sklearn.cluster import KMeans
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.pipeline import Pipeline
from sklearn.grid_search import ParameterGrid
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.kernel_approximation import Nystroem
# generic model builder
def build_models(model_class, param_grid):
print('Building %s models' % str(model_class).split('.')[-1][:-2])
return [model_class(**p) for p in ParameterGrid(param_grid)]
def build_randomForestClassifiers(random_state=None):
param_grid = {
'n_estimators': [20, 50, 100],
'criterion': ['gini', 'entropy'],
'max_features': [None, 'auto', 'sqrt', 'log2'],
'max_depth': [1, 2, 5, 10],
'min_density': [0.25, 0.5, 0.75, 1.0],
'random_state': [random_state],
}
return build_models(RandomForestClassifier, param_grid)
def build_gradientBoostingClassifiers(random_state=None):
param_grid = {
'max_depth': [1, 2, 5, 10],
'n_estimators': [10, 20, 50, 100],
'subsample': np.linspace(0.2, 1.0, 5),
'max_features': np.linspace(0.2, 1.0, 5),
}
return build_models(GradientBoostingClassifier, param_grid)
def build_sgdClassifiers(random_state=None):
param_grid = {
'loss': ['log', 'modified_huber'],
'penalty': ['elasticnet'],
'alpha': [0.0001, 0.001, 0.01, 0.1],
'learning_rate': ['constant', 'optimal'],
'n_iter': [2, 5, 10],
'eta0': [0.001, 0.01, 0.1],
'l1_ratio': np.linspace(0.0, 1.0, 3),
}
return build_models(SGDClassifier, param_grid)
def build_decisionTreeClassifiers(random_state=None):
rs = check_random_state(random_state)
param_grid = {
'criterion': ['gini', 'entropy'],
'max_features': [None, 'auto', 'sqrt', 'log2'],
'max_depth': [None, 1, 2, 5, 10],
'min_samples_split': [1, 2, 5, 10],
'random_state': [rs.random_integers(100000) for i in xrange(3)],
}
return build_models(DecisionTreeClassifier, param_grid)
def build_extraTreesClassifiers(random_state=None):
param_grid = {
'criterion': ['gini', 'entropy'],
'n_estimators': [5, 10, 20],
'max_features': [None, 'auto', 'sqrt', 'log2'],
'max_depth': [None, 1, 2, 5, 10],
'min_samples_split': [2, 5, 10],
'random_state': [random_state],
}
return build_models(ExtraTreesClassifier, param_grid)
def build_svcs(random_state=None):
print('Building SVM models')
Cs = np.logspace(-7, 2, 10)
gammas = np.logspace(-6, 2, 9, base=2)
coef0s = [-1.0, 0.0, 1.0]
models = []
for C in Cs:
models.append(SVC(kernel='linear', C=C, probability=True,
cache_size=1000))
for C in Cs:
for coef0 in coef0s:
models.append(SVC(kernel='sigmoid', C=C, coef0=coef0,
probability=True, cache_size=1000))
for C in Cs:
for gamma in gammas:
models.append(SVC(kernel='rbf', C=C, gamma=gamma,
cache_size=1000, probability=True))
param_grid = {
'kernel': ['poly'],
'C': Cs,
'gamma': gammas,
'degree': [2],
'coef0': coef0s,
'probability': [True],
'cache_size': [1000],
}
for params in ParameterGrid(param_grid):
models.append(SVC(**params))
return models
def build_kernPipelines(random_state=None):
print('Building Kernel Approximation Pipelines')
param_grid = {
'n_components': xrange(5, 105, 5),
'gamma': np.logspace(-6, 2, 9, base=2)
}
models = []
for params in ParameterGrid(param_grid):
nys = Nystroem(**params)
lr = LogisticRegression()
models.append(Pipeline([('nys', nys), ('lr', lr)]))
return models
def build_kmeansPipelines(random_state=None):
print('Building KMeans-Logistic Regression Pipelines')
param_grid = {
'n_clusters': xrange(5, 205, 5),
'init': ['k-means++', 'random'],
'n_init': [1, 2, 5, 10],
'random_state': [random_state],
}
models = []
for params in ParameterGrid(param_grid):
km = KMeans(**params)
lr = LogisticRegression()
models.append(Pipeline([('km', km), ('lr', lr)]))
return models
models_dict = {
'svc': build_svcs,
'sgd': build_sgdClassifiers,
'gbc': build_gradientBoostingClassifiers,
'dtree': build_decisionTreeClassifiers,
'forest': build_randomForestClassifiers,
'extra': build_extraTreesClassifiers,
'kmp': build_kmeansPipelines,
'kernp': build_kernPipelines,
}
def build_model_library(model_types=['dtree'], random_seed=None):
models = []
for m in model_types:
models.extend(models_dict[m](random_state=random_seed))
return models
| bsd-3-clause |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | preprocessing/filterBank.py | 4 | 1401 | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 21 17:24:01 2015.
@author: fornax, alexandre
"""
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from scipy.signal import butter, lfilter
class FilterBank(BaseEstimator, TransformerMixin):
"""Filterbank TransformerMixin.
Return signal processed by a bank of butterworth filters.
"""
def __init__(self, filters='LowpassBank'):
"""init."""
if filters == 'LowpassBank':
self.freqs_pairs = [[0.5], [1], [2], [3], [4], [5], [7], [9], [15],
[30]]
else:
self.freqs_pairs = filters
self.filters = filters
def fit(self, X, y=None):
"""Fit Method, Not used."""
return self
def transform(self, X, y=None):
"""Transform. Apply filters."""
X_tot = None
for freqs in self.freqs_pairs:
if len(freqs) == 1:
b, a = butter(5, freqs[0] / 250.0, btype='lowpass')
else:
if freqs[1] - freqs[0] < 3:
b, a = butter(3, np.array(freqs) / 250.0, btype='bandpass')
else:
b, a = butter(5, np.array(freqs) / 250.0, btype='bandpass')
X_filtered = lfilter(b, a, X, axis=0)
X_tot = X_filtered if X_tot is None else np.c_[X_tot, X_filtered]
return X_tot
| bsd-3-clause |
cl4rke/scikit-learn | examples/plot_kernel_approximation.py | 262 | 8004 | """
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: sklearn.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please note that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
The second plot visualized the decision surfaces of the RBF kernel SVM and
the linear SVM with approximate kernel maps.
The plot shows decision surfaces of the classifiers projected onto
the first two principal components of the data. This visualization should
be taken with a grain of salt since it is just an interesting slice through
the decision surface in 64 dimensions. In particular note that
a datapoint (represented as a dot) does not necessarily be classified
into the region it is lying in, since it will not lie on the plane
that the first two principal components span.
The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
in :ref:`kernel_approximation`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, pipeline
from sklearn.kernel_approximation import (RBFSampler,
Nystroem)
from sklearn.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2]
# Now predict the value of the digit on the second half:
data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:]
#data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier),
("svm", svm.LinearSVC())])
nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
plt.figure(figsize=(8, 8))
accuracy = plt.subplot(211)
# second y axis for timeings
timescale = plt.subplot(212)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, '--',
label='Nystroem approx. kernel')
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, '--',
label='Fourier approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Gemerate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel)\n with Fourier rbf feature map\n'
'n_components=100',
'SVC (linear kernel)\n with Nystroem rbf feature map\n'
'n_components=100']
plt.tight_layout()
plt.figure(figsize=(12, 5))
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm,
fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.tight_layout()
plt.show()
| bsd-3-clause |
wiheto/teneto | teneto/networkmeasures/shortest_temporal_path.py | 1 | 10473 | """Functions to calculate the shortest temporal path."""
import numpy as np
from teneto.utils import process_input
import itertools
import pandas as pd
def seqpath_to_path(pairseq, source):
# seq must be a path sequence (i.e. possible paths per timepoint)
# convert the sequence of pairs to a n x 2 array
pairrows = np.reshape(pairseq, [int(len(pairseq)/2), 2])
queue = [(0, [0])]
# if source is in the first tuple, return
if source in pairrows[0]:
yield [pairrows[0].tolist()]
while queue:
# Set the queue
(node, path) = queue.pop(0)
# Get all remaining possible paths in sequence
iterset = set(np.where((pairrows == pairrows[node, 0]) | (
pairrows == pairrows[node, 1]))[0]) - set(range(node+1))
for nextset in iterset:
if source in pairrows[nextset]:
yield list(reversed(pairrows[path + [nextset]].tolist()))
else:
queue.append((nextset, path + [nextset]))
def shortest_path_from_pairseq(pairseq, source):
try:
return next(seqpath_to_path(pairseq, source))
except StopIteration:
return None
def shortest_temporal_path(tnet, steps_per_t='all', i=None, j=None, it=None, minimise='temporal_distance'):
"""
Shortest temporal path
Parameters
--------------
tnet : tnet obj, array or dict
input network. nettype: bu, bd.
steps_per_t : int or str
If str, should be 'all'.
How many edges can be travelled during a single time-point.
i : list
List of node indicies to restrict analysis. These are nodes the paths start from. Default is all nodes.
j : list
List of node indicies to restrict analysis. There are nodes the paths end on. Default is all nodes.
it : None, int, list
Time points for parts.
Either None (default) which takes all time points,
an integer to indicate which time point to start at,
or a list of time-points that is included in analysis
(including end time-point).
minimise : str
Can be "temporal_distance", returns the path that has the smallest temporal distance.
It is possible there can be a path that is a smaller
topological distance (this option currently not available).
Returns
-------------------
paths : pandas df
Dataframe consisting of information about all the paths found.
Notes
---------------
The shortest temporal path calculates the temporal and topological distance there to be a path between nodes.
The argument steps_per_t allows for multiple nodes to be travelled per time-point.
Topological distance is the number of edges that are travelled. Temporal distance is the number of time-points.
This function returns the path that is the shortest temporal distance away.
Examples
--------
Let us start by creating a small network.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> import teneto
>>> G = np.zeros([4, 4, 3])
>>> G[0, 1, [0, 2]] = 1
>>> G[0, 3, [2]] = 1
>>> G[1, 2, [1]] = 1
>>> G[2, 3, [1]] = 1
Let us look at this network to see what is there.
>>> fig, ax = plt.subplots(1)
>>> ax = teneto.plot.slice_plot(G, ax, nodelabels=[0,1,2,3], timelabels=[0,1,2], cmap='Set2')
>>> plt.tight_layout()
>>> fig.show()
.. plot::
import numpy as np
import matplotlib.pyplot as plt
import teneto
G = np.zeros([4, 4, 3])
G[0, 1, [0, 2]] = 1
G[0, 3, [2]] = 1
G[1, 2, [1]] = 1
G[2, 3, [1]] = 1
fig,ax = plt.subplots(1)
teneto.plot.slice_plot(G,ax,nodelabels=[0,1,2,3],timelabels=[0,1,2],cmap='Set2')
plt.tight_layout()
fig.show()
Here we can visualize what the shortest paths are.
Let us start by starting at
node 0 we want to find the path to node 3, starting at time 0. To do this we write:
>>> sp = teneto.networkmeasures.shortest_temporal_path(G, i=0, j=3, it=0)
>>> sp['temporal-distance']
0 2
Name: temporal-distance, dtype: int64
>>> sp['topological-distance']
0 3
Name: topological-distance, dtype: int64
>>> sp['path includes']
0 [[0, 1], [1, 2], [2, 3]]
Name: path includes, dtype: object
Here we see that the shortest path takes 3 steps (topological distance of 3) at 2 time points.
It starts by going from node 0 to 1 at t=0, then 1 to 2 and 2 to 3 at t=1.
We can see all the nodes
that were travelled in the "path includes" list.
In the above example, it was possible to traverse multiple edges at a single time-point.
It is possible to restrain that by setting the steps_per_t argument
>>> sp = teneto.networkmeasures.shortest_temporal_path(G, i=0, j=3, it=0, steps_per_t=1)
>>> sp['temporal-distance']
0 3
Name: temporal-distance, dtype: int64
>>> sp['topological-distance']
0 1
Name: topological-distance, dtype: int64
>>> sp['path includes']
0 [[0, 3]]
Name: path includes, dtype: object
Here we see that the path is now only one edge, 0 to 3 at t=2.
The quicker path is no longer possible.
"""
tnet = process_input(tnet, ['C', 'G', 'TN'], 'TN')
# If i, j or it are inputs, process them
if i is None:
source_nodes = np.arange(tnet.netshape[0])
elif isinstance(i, int):
source_nodes = [i]
elif isinstance(i, list):
source_nodes = i
else:
raise ValueError('Unknown i input. Should be None, int or list')
if j is None:
target_nodes = np.arange(tnet.netshape[0])
elif isinstance(j, int):
target_nodes = [j]
elif isinstance(j, list):
target_nodes = j
else:
raise ValueError('Unknown j input. Should be None, int or list')
if it is None:
time_points = np.arange(tnet.netshape[1])
elif isinstance(it, int):
time_points = [it]
elif isinstance(it, list):
time_points = it
else:
raise ValueError('Unknown t input. Should be None, int or list')
# Two step process.
# First, get what the network can reach per timepoint.
# Second, check all possible sequences of what the network can reach for the shortest sequence.
paths = []
for source in source_nodes:
for target in target_nodes:
if target == source:
pass
else:
for tstart in time_points:
# Part 1 starts here
ij = [source]
t = tstart
step = 1
lenij = 1
pairs = []
stop = 0
while stop == 0:
# Only select i if directed, ij if undirected.
if tnet.nettype[1] == 'u':
network = tnet.get_network_when(ij=list(ij), t=t)
elif tnet.nettype[1] == 'd':
network = tnet.get_network_when(i=list(ij), t=t)
new_nodes = network[['i', 'j']].values
if len(new_nodes) != 0:
pairs.append(new_nodes.tolist())
new_nodes = new_nodes.flatten()
ij = np.hstack([ij, new_nodes])
ij = np.unique(ij)
if minimise == 'temporal_distance' and target in ij:
stop = 1
elif minimise == 'topology' and t == tnet.netshape[1] and target in ij:
stop = 1
elif t == tnet.netshape[1]:
t = np.nan
ij = [target]
stop = 1
else:
if len(ij) == lenij:
t += 1
step = 1
elif steps_per_t == 'all':
pass
elif step < steps_per_t:
step += 1
else:
t += 1
step = 1
if t == tnet.netshape[1]:
t = np.nan
ij = [target]
stop = 1
lenij = len(ij)
# correct t for return
# Only run if one pair is added.
t += 1
# part 2 starts here
path = np.nan
path_length = np.nan
for n in itertools.product(*reversed(pairs)):
a = np.array(n).flatten()
if source not in a or target not in a:
pass
else:
pathtmp = shortest_path_from_pairseq(a, source)
if pathtmp:
if not isinstance(path, list):
path = pathtmp
path_length = len(path)
elif len(pathtmp) < path_length:
path = pathtmp
path_length = len(path)
elif len(pathtmp) == path_length:
if isinstance(path[0][0], list):
if pathtmp in path:
pass
else:
path.append(pathtmp)
else:
if path == pathtmp:
pass
else:
path = [path, pathtmp]
# elif sourcei < 2 and target in a[:2]:
# path_length = 2
paths.append([source, target, tstart, t-tstart, path_length, path])
paths = pd.DataFrame(data=paths, columns=[
'from', 'to', 't_start', 'temporal-distance', 'topological-distance', 'path includes'])
return paths
| gpl-3.0 |
csdms-contrib/1d_hillslope_mcmc | Chain_processing_bestfit.py | 1 | 5261 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 08 13:50:11 2010
@author: Administrator
"""
import numpy as np
import matplotlib.pyplot as plt
import os
from itertools import izip
argmax = lambda array: max(izip(array, xrange(len(array))))[1]
# parameters for histograms
n_hist_bins =100
# bounds of the probability windows
#CDF_interp_points = [0.025,0.05,0.25,0.75,0.95,0.975]
# these are points in the CDF that indicate 2sigma
# 1 sigma, and the mean value of the parameters
CDF_interp_points = [0.023,0.159,0.5,0.841,0.977]
# get the directory path
root=os.getcwd()
#load the chain and rate files
#paramData = np.loadtxt(root+'/test_chain2.txt', unpack=False)
#paramData2 = np.loadtxt(root+'/fixed_chain.chain', unpack=False)
paramData2 = np.loadtxt(root+'/chain2.chain', unpack=False)
dimData = paramData2.shape
n_params = 3
n_cols = dimData[1]
n_runs = dimData[0]
#print "n_cols: "+str(n_cols)+" and n times: "+str(n_runs)
paramData = paramData2[500:n_runs,:]
dimData = paramData.shape
n_cols = dimData[1]
n_runs = dimData[0]
zero_vec = np.zeros(n_runs)
#print "n_cols: "+str(n_cols)+" and n times: "+str(n_runs)
# now for each time slice create a normalized histogram
#weighted by the likliehoods
like_weights = paramData[:,8]
run = paramData[:,0]
tpeak = paramData[:,4]
Upeak = paramData[:,5]
Uwidth = paramData[:,6]
#print "like weights: "
#for i in range (0,len(like_weights)):
# print str(like_weights[i])
#####
###
### All the plotting stuff below is used to just visually
### look for the burn in period
###
fig = plt.figure(1,figsize=(16,10))
ax = fig.add_subplot(311)
ax.fill_between(run, zero_vec, tpeak,
where=None, alpha=0.1, facecolor = 'red',edgecolor='black',
linewidth=2)
title_str = 'T*_{peak} chain, n='+str(n_runs)
plt.title(title_str,size=30)
ax = fig.add_subplot(312)
ax.fill_between(run, zero_vec, Upeak,
where=None, alpha=0.1, facecolor = 'green',edgecolor='black',
linewidth=2)
title_str = 'U*_{peak} chain, n='+str(n_runs)
plt.title(title_str,size=30)
ax = fig.add_subplot(313)
ax.fill_between(run, zero_vec, Uwidth,
where=None, alpha=0.1, facecolor = 'blue',edgecolor='black',
linewidth=2)
title_str = 'U*_{width} chain, n='+str(n_runs)
plt.title(title_str,size=30)
plt.savefig('param_chains.png')
###
###
#####
# create a histogram from the data
# this data is normalized, but it is the
# probability *density* so the integral = 1
tPHist,tPbins=np.histogram(tpeak,bins=n_hist_bins,
range=(0.25,0.4),normed=True,weights=like_weights)
UPHist,UPbins=np.histogram(Upeak,bins=n_hist_bins,
range=(15,25),normed=True,weights=like_weights)
UwHist,Uwbins=np.histogram(Uwidth,bins=n_hist_bins,
range=(0.25,0.4),normed=True,weights=like_weights)
nbins = len(tPbins)
#print "bins "+str(len(tPbins))
#for i in range(0,len(tPbins)):
# print str(tPbins[i])+" "+str(UPbins[i])+" "+str(Uwbins[i])
#print "histogram: "
#for i in range(0,len(tPHist)):
# print str(tPHist[i])+" "+str(UPHist[i])+" "+str(UwHist[i])
# convert into probability
tPt_density = sum(tPHist)
tP_prob = np.divide(tPHist,tPt_density)
UPt_density = sum(UPHist)
UP_prob = np.divide(UPHist,UPt_density)
Uwt_density = sum(UwHist)
Uw_prob = np.divide(UwHist,Uwt_density)
#print "probability"
#for i in range(0,len(tP_prob)):
# print str(tP_prob[i])+ " "+ str(UP_prob[i])+" "+str(Uw_prob[i])
# now get the cumulative density
sz_DHist = tPHist.size
left_bar_tP = np.zeros(sz_DHist)
right_bar_tP = np.zeros(sz_DHist)
midpoint_bar_tP = np.zeros(sz_DHist)
Data_cdf_tP = np.zeros(sz_DHist)
last_val_tP = 0
for i in range(0,sz_DHist):
left_bar_tP[i]=tPbins[i]
right_bar_tP[i]=tPbins[i+1]
midpoint_bar_tP[i] = (left_bar_tP[i]+right_bar_tP[i])/2
Data_cdf_tP[i]=tP_prob[i]+last_val_tP
last_val_tP = Data_cdf_tP[i]
tP_bounds = np.interp(CDF_interp_points,Data_cdf_tP,midpoint_bar_tP)
# now get the cumulative density
sz_DHist = UPHist.size
left_bar_UP = np.zeros(sz_DHist)
right_bar_UP = np.zeros(sz_DHist)
midpoint_bar_UP = np.zeros(sz_DHist)
Data_cdf_UP = np.zeros(sz_DHist)
last_val_UP = 0
for i in range(0,sz_DHist):
left_bar_UP[i]=UPbins[i]
right_bar_UP[i]=UPbins[i+1]
midpoint_bar_UP[i] = (left_bar_UP[i]+right_bar_UP[i])/2
Data_cdf_UP[i]=UP_prob[i]+last_val_UP
last_val_UP = Data_cdf_UP[i]
UP_bounds = np.interp(CDF_interp_points,Data_cdf_UP,midpoint_bar_UP)
# now get the cumulative density
sz_DHist = UwHist.size
left_bar_Uw = np.zeros(sz_DHist)
right_bar_Uw = np.zeros(sz_DHist)
midpoint_bar_Uw = np.zeros(sz_DHist)
Data_cdf_Uw = np.zeros(sz_DHist)
last_val_Uw = 0
for i in range(0,sz_DHist):
left_bar_Uw[i]=Uwbins[i]
right_bar_Uw[i]=Uwbins[i+1]
midpoint_bar_Uw[i] = (left_bar_Uw[i]+right_bar_Uw[i])/2
Data_cdf_Uw[i]=Uw_prob[i]+last_val_Uw
last_val_Uw = Data_cdf_Uw[i]
Uw_bounds = np.interp(CDF_interp_points,Data_cdf_Uw,midpoint_bar_Uw)
n_bounds = len(Uw_bounds)
for i in range(0,n_bounds):
print str(tP_bounds[i])+" "+str(UP_bounds[i])+" "+str(Uw_bounds[i])+" "
| gpl-3.0 |
vigilv/scikit-learn | examples/svm/plot_svm_regression.py | 249 | 1451 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
TomAugspurger/pandas | pandas/tests/util/test_validate_kwargs.py | 2 | 1740 | import pytest
from pandas.util._validators import validate_bool_kwarg, validate_kwargs
_fname = "func"
def test_bad_kwarg():
good_arg = "f"
bad_arg = good_arg + "o"
compat_args = {good_arg: "foo", bad_arg + "o": "bar"}
kwargs = {good_arg: "foo", bad_arg: "bar"}
msg = fr"{_fname}\(\) got an unexpected keyword argument '{bad_arg}'"
with pytest.raises(TypeError, match=msg):
validate_kwargs(_fname, kwargs, compat_args)
@pytest.mark.parametrize("i", range(1, 3))
def test_not_all_none(i):
bad_arg = "foo"
msg = (
fr"the '{bad_arg}' parameter is not supported "
fr"in the pandas implementation of {_fname}\(\)"
)
compat_args = {"foo": 1, "bar": "s", "baz": None}
kwarg_keys = ("foo", "bar", "baz")
kwarg_vals = (2, "s", None)
kwargs = dict(zip(kwarg_keys[:i], kwarg_vals[:i]))
with pytest.raises(ValueError, match=msg):
validate_kwargs(_fname, kwargs, compat_args)
def test_validation():
# No exceptions should be raised.
compat_args = {"f": None, "b": 1, "ba": "s"}
kwargs = dict(f=None, b=1)
validate_kwargs(_fname, kwargs, compat_args)
@pytest.mark.parametrize("name", ["inplace", "copy"])
@pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0])
def test_validate_bool_kwarg_fail(name, value):
msg = (
f'For argument "{name}" expected type bool, '
f"received type {type(value).__name__}"
)
with pytest.raises(ValueError, match=msg):
validate_bool_kwarg(value, name)
@pytest.mark.parametrize("name", ["inplace", "copy"])
@pytest.mark.parametrize("value", [True, False, None])
def test_validate_bool_kwarg(name, value):
assert validate_bool_kwarg(value, name) == value
| bsd-3-clause |
lyst/lightfm | lightfm/data.py | 1 | 14295 | import array
import numpy as np
import scipy.sparse as sp
import sklearn.preprocessing
class _IncrementalCOOMatrix(object):
def __init__(self, shape, dtype):
if dtype is np.int32:
type_flag = "i"
elif dtype is np.int64:
type_flag = "l"
elif dtype is np.float32:
type_flag = "f"
elif dtype is np.float64:
type_flag = "d"
else:
raise Exception("Dtype not supported.")
self.shape = shape
self.dtype = dtype
self.rows = array.array("i")
self.cols = array.array("i")
self.data = array.array(type_flag)
def append(self, i, j, v):
m, n = self.shape
if i >= m or j >= n:
raise Exception("Index out of bounds")
self.rows.append(i)
self.cols.append(j)
self.data.append(v)
def tocoo(self):
rows = np.frombuffer(self.rows, dtype=np.int32)
cols = np.frombuffer(self.cols, dtype=np.int32)
data = np.frombuffer(self.data, dtype=self.dtype)
return sp.coo_matrix((data, (rows, cols)), shape=self.shape)
def __len__(self):
return len(self.data)
class _FeatureBuilder(object):
def __init__(
self, id_mapping, feature_mapping, identity_features, normalize, entity_type
):
self._id_mapping = id_mapping
self._feature_mapping = feature_mapping
self._identity_features = identity_features
self._normalize = normalize
self._entity_type = entity_type
def features_shape(self):
return len(self._id_mapping), len(self._feature_mapping)
def _iter_features(self, features):
if isinstance(features, dict):
for entry in features.items():
yield entry
else:
for feature_name in features:
yield (feature_name, 1.0)
def _process_features(self, datum):
if len(datum) != 2:
raise ValueError(
"Expected tuples of ({}_id, features), "
"got {}.".format(self._entity_type, datum)
)
entity_id, features = datum
if entity_id not in self._id_mapping:
raise ValueError(
"{entity_type} id {entity_id} not in {entity_type} id mappings.".format(
entity_type=self._entity_type, entity_id=entity_id
)
)
idx = self._id_mapping[entity_id]
for (feature, weight) in self._iter_features(features):
if feature not in self._feature_mapping:
raise ValueError(
"Feature {} not in feature mapping. "
"Call fit first.".format(feature)
)
feature_idx = self._feature_mapping[feature]
yield (idx, feature_idx, weight)
def build(self, data):
features = _IncrementalCOOMatrix(self.features_shape(), np.float32)
if self._identity_features:
for (_id, idx) in self._id_mapping.items():
features.append(idx, self._feature_mapping[_id], 1.0)
for datum in data:
for (entity_idx, feature_idx, weight) in self._process_features(datum):
features.append(entity_idx, feature_idx, weight)
features = features.tocoo().tocsr()
if self._normalize:
if np.any(features.getnnz(1) == 0):
raise ValueError(
"Cannot normalize feature matrix: some rows have zero norm. "
"Ensure that features were provided for all entries."
)
sklearn.preprocessing.normalize(features, norm="l1", copy=False)
return features
class Dataset(object):
"""
Tool for building interaction and feature matrices, taking care of the
mapping between user/item ids and feature names and internal feature indices.
To create a dataset:
- Create an instance of the `Dataset` class.
- Call `fit` (or `fit_partial`), supplying user/item ids and feature names
that you want to use in your model. This will create internal mappings that
translate the ids and feature names to internal indices used by the LightFM
model.
- Call `build_interactions` with an iterable of (user id, item id) or (user id,
item id, weight) to build an interactions and weights matrix.
- Call `build_user/item_features` with iterables of (user/item id, [features])
or (user/item id, {feature: feature weight}) to build feature matrices.
- To add new user/item ids or features, call `fit_partial` again. You will need
to resize your LightFM model to be able to use the new features.
Parameters
----------
user_identity_features: bool, optional
Create a unique feature for every user in addition to other features.
If true (default), a latent vector will be allocated for every user. This
is a reasonable default for most applications, but should be set to false
if there is very little data for every user. For more details see the Notes
in :doc:`LightFM<lightfm>`.
item_identity_features: bool, optional
Create a unique feature for every item in addition to other features.
If true (default), a latent vector will be allocated for every item. This
is a reasonable default for most applications, but should be set to false
if there is very little data for every item. For more details see the Notes
in :doc:`LightFM<lightfm>`.
"""
def __init__(self, user_identity_features=True, item_identity_features=True):
self._user_identity_features = user_identity_features
self._item_identity_features = item_identity_features
self._user_id_mapping = {}
self._item_id_mapping = {}
self._user_feature_mapping = {}
self._item_feature_mapping = {}
def _check_fitted(self):
if not self._user_id_mapping or not self._item_id_mapping:
raise ValueError(
"You must call fit first to build the item and user " "id mappings."
)
def fit(self, users, items, user_features=None, item_features=None):
"""
Fit the user/item id and feature name mappings.
Calling fit the second time will reset existing mappings.
Parameters
----------
users: iterable of user ids
items: iterable of item ids
user_features: iterable of user features, optional
item_features: iterable of item features, optional
"""
self._user_id_mapping = {}
self._item_id_mapping = {}
self._user_feature_mapping = {}
self._item_feature_mapping = {}
return self.fit_partial(users, items, user_features, item_features)
def fit_partial(
self, users=None, items=None, user_features=None, item_features=None
):
"""
Fit the user/item id and feature name mappings.
Calling fit the second time will add new entries to existing mappings.
Parameters
----------
users: iterable of user ids, optional
items: iterable of item ids, optional
user_features: iterable of user features, optional
item_features: iterable of item features, optional
"""
if users is not None:
for user_id in users:
self._user_id_mapping.setdefault(user_id, len(self._user_id_mapping))
if self._user_identity_features:
self._user_feature_mapping.setdefault(
user_id, len(self._user_feature_mapping)
)
if items is not None:
for item_id in items:
self._item_id_mapping.setdefault(item_id, len(self._item_id_mapping))
if self._item_identity_features:
self._item_feature_mapping.setdefault(
item_id, len(self._item_feature_mapping)
)
if user_features is not None:
for user_feature in user_features:
self._user_feature_mapping.setdefault(
user_feature, len(self._user_feature_mapping)
)
if item_features is not None:
for item_feature in item_features:
self._item_feature_mapping.setdefault(
item_feature, len(self._item_feature_mapping)
)
def _unpack_datum(self, datum):
if len(datum) == 3:
(user_id, item_id, weight) = datum
elif len(datum) == 2:
(user_id, item_id) = datum
weight = 1.0
else:
raise ValueError(
"Expecting tuples of (user_id, item_id, weight) "
"or (user_id, item_id). Got {}".format(datum)
)
user_idx = self._user_id_mapping.get(user_id)
item_idx = self._item_id_mapping.get(item_id)
if user_idx is None:
raise ValueError(
"User id {} not in user id mapping. Make sure "
"you call the fit method.".format(user_id)
)
if item_idx is None:
raise ValueError(
"Item id {} not in item id mapping. Make sure "
"you call the fit method.".format(item_id)
)
return (user_idx, item_idx, weight)
def interactions_shape(self):
"""
Return a tuple of (num users, num items).
"""
return (len(self._user_id_mapping), len(self._item_id_mapping))
def build_interactions(self, data):
"""
Build an interaction matrix.
Two matrices will be returned: a (num_users, num_items)
COO matrix with interactions, and a (num_users, num_items)
matrix with the corresponding interaction weights.
Parameters
----------
data: iterable of (user_id, item_id) or (user_id, item_id, weight)
An iterable of interactions. The user and item ids will be
translated to internal model indices using the mappings
constructed during the fit call. If weights are not provided
they will be assumed to be 1.0.
Returns
-------
(interactions, weights): COO matrix, COO matrix
Two COO matrices: the interactions matrix
and the corresponding weights matrix.
"""
interactions = _IncrementalCOOMatrix(self.interactions_shape(), np.int32)
weights = _IncrementalCOOMatrix(self.interactions_shape(), np.float32)
for datum in data:
user_idx, item_idx, weight = self._unpack_datum(datum)
interactions.append(user_idx, item_idx, 1)
weights.append(user_idx, item_idx, weight)
return (interactions.tocoo(), weights.tocoo())
def user_features_shape(self):
"""
Return the shape of the user features matrix.
Returns
-------
(num user ids, num user features): tuple of ints
The shape.
"""
return (len(self._user_id_mapping), len(self._user_feature_mapping))
def build_user_features(self, data, normalize=True):
"""
Build a user features matrix out of an iterable of the form
(user id, [list of feature names]) or (user id, {feature name: feature weight}).
Parameters
----------
data: iterable of the form
(user id, [list of feature names]) or (user id,
{feature name: feature weight}).
User and feature ids will be translated to internal indices
constructed during the fit call.
normalize: bool, optional
If true, will ensure that feature weights sum to 1 in every row.
Returns
-------
feature matrix: CSR matrix (num users, num features)
Matrix of user features.
"""
builder = _FeatureBuilder(
self._user_id_mapping,
self._user_feature_mapping,
self._user_identity_features,
normalize,
"user",
)
return builder.build(data)
def item_features_shape(self):
"""
Return the shape of the item features matrix.
Returns
-------
(num item ids, num item features): tuple of ints
The shape.
"""
return (len(self._item_id_mapping), len(self._item_feature_mapping))
def build_item_features(self, data, normalize=True):
"""
Build a item features matrix out of an iterable of the form
(item id, [list of feature names]) or (item id, {feature name: feature weight}).
Parameters
----------
data: iterable of the form
(item id, [list of feature names]) or (item id,
{feature name: feature weight}).
Item and feature ids will be translated to internal indices
constructed during the fit call.
normalize: bool, optional
If true, will ensure that feature weights sum to 1 in every row.
Returns
-------
feature matrix: CSR matrix (num items, num features)
Matrix of item features.
"""
builder = _FeatureBuilder(
self._item_id_mapping,
self._item_feature_mapping,
self._item_identity_features,
normalize,
"item",
)
return builder.build(data)
def model_dimensions(self):
"""
Returns a tuple that characterizes the number of user/item feature
embeddings in a LightFM model for this dataset.
"""
return (len(self._user_feature_mapping), len(self._item_feature_mapping))
def mapping(self):
"""
Return the constructed mappings.
Invert these to map internal indices to external ids.
Returns
-------
(user id map, user feature map, item id map, item feature map): tuple of dictionaries
"""
return (
self._user_id_mapping,
self._user_feature_mapping,
self._item_id_mapping,
self._item_feature_mapping,
)
| apache-2.0 |
bbfrederick/rapidtide | rapidtide/tests/test_simulate.py | 1 | 3306 | #!/usr/bin/env python
# -*- coding: latin-1 -*-
#
# Copyright 2017-2021 Blaise Frederick
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import rapidtide.miscmath as tide_math
import rapidtide.resample as tide_res
from rapidtide.tests.utils import mse
def test_simulate(display=False):
fmritr = 1.5
numtrs = 260
fmriskip = 0
oversampfac = 10
inputfreq = oversampfac / fmritr
inputstarttime = 0.0
timecourse = np.zeros((oversampfac * numtrs), dtype="float")
timecourse[500:600] = 1.0
timecourse[700:750] = 1.0
# read in the timecourse to resample
inputvec = tide_math.stdnormalize(timecourse)
simregressorpts = len(inputvec)
# prepare the input data for interpolation
print("Input regressor has ", simregressorpts, " points")
inputstep = 1.0 / inputfreq
nirs_x = np.r_[0.0 : 1.0 * simregressorpts] * inputstep - inputstarttime
nirs_y = inputvec[0:simregressorpts]
print("nirs regressor runs from ", nirs_x[0], " to ", nirs_x[-1])
# prepare the output timepoints
fmrifreq = 1.0 / fmritr
initial_fmri_x = np.r_[0 : fmritr * (numtrs - fmriskip) : fmritr] + fmritr * fmriskip
print("length of fmri after removing skip:", len(initial_fmri_x))
print("fmri time runs from ", initial_fmri_x[0], " to ", initial_fmri_x[-1])
# set the sim parameters
immean = 1.0
boldpc = 1.0
lag = 10.0 * fmritr
noiselevel = 0.0
simdata = np.zeros((len(initial_fmri_x)), dtype="float")
fmrilcut = 0.0
fmriucut = fmrifreq / 2.0
# set up fast resampling
padtime = 60.0
numpadtrs = int(padtime / fmritr)
padtime = fmritr * numpadtrs
genlagtc = tide_res.FastResampler(nirs_x, nirs_y, padtime=padtime, doplot=False)
initial_fmri_y = genlagtc.yfromx(initial_fmri_x)
if display:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title("Regressors")
plt.plot(nirs_x, nirs_y, initial_fmri_x, initial_fmri_y)
plt.show()
# loop over space
sliceoffsettime = 0.0
fmri_x = initial_fmri_x - lag - sliceoffsettime
print(fmri_x[0], initial_fmri_x[0], lag, sliceoffsettime)
fmri_y = genlagtc.yfromx(fmri_x)
thenoise = noiselevel * np.random.standard_normal(len(fmri_y))
simdata[:] = immean * (1.0 + (boldpc / 100.0) * fmri_y) + thenoise
if display:
plt.plot(initial_fmri_x, simdata, initial_fmri_x, initial_fmri_y)
plt.show()
# tests
msethresh = 1e-6
aethresh = 2
assert mse(simdata, initial_fmri_y) < aethresh
# np.testing.assert_almost_equal(simdata, initial_fmri_y)
def main():
test_simulate(display=True)
if __name__ == "__main__":
mpl.use("TkAgg")
main()
| apache-2.0 |
hitszxp/scikit-learn | sklearn/feature_selection/rfe.py | 3 | 14090 | # Authors: Alexandre Gramfort <[email protected]>
# Vincent Michel <[email protected]>
# Gilles Louppe <[email protected]>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..cross_validation import _check_cv as check_cv
from ..cross_validation import _safe_split, _score
from .base import SelectorMixin
from ..metrics.scorer import check_scoring
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
estimator_params : dict
Parameters for the external estimator.
Useful for doing grid searches when an `RFE` object is passed as an
argument to, e.g., a `sklearn.grid_search.GridSearchCV` object.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]` corresponds to the \
ranking position of the i-th feature. Selected (i.e., estimated \
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
estimator_params={}, verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features / 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
estimator.set_params(**self.estimator_params)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
if estimator.coef_.ndim > 1:
ranks = np.argsort(safe_sqr(estimator.coef_).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(estimator.coef_))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
self.estimator_ = clone(self.estimator)
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(X[:, support_], y)
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int or cross-validation generator, optional (default=None)
If int, it is the number of folds.
If None, 3-fold cross-validation is performed by default.
Specific cross-validation objects can also be passed, see
`sklearn.cross_validation module` for details.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
estimator_params : dict
Parameters for the external estimator.
Useful for doing grid searches when an `RFE` object is passed as an
argument to, e.g., a `sklearn.grid_search.GridSearchCV` object.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
`grid_scores_[i]` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None,
estimator_params={}, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
# Initialization
rfe = RFE(estimator=self.estimator, n_features_to_select=1,
step=self.step, estimator_params=self.estimator_params,
verbose=self.verbose - 1)
cv = check_cv(self.cv, X, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
scores = np.zeros(X.shape[1])
# Cross-validation
for n, (train, test) in enumerate(cv):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
# Compute a full ranking of the features
ranking_ = rfe.fit(X_train, y_train).ranking_
# Score each subset of features
for k in range(0, max(ranking_)):
mask = np.where(ranking_ <= k + 1)[0]
estimator = clone(self.estimator)
estimator.fit(X_train[:, mask], y_train)
score = _score(estimator, X_test[:, mask], y_test, scorer)
if self.verbose > 0:
print("Finished fold with %d / %d feature ranks, score=%f"
% (k + 1, max(ranking_), score))
scores[k] += score
# Pick the best number of features on average
k = np.argmax(scores)
best_score = scores[k]
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=k+1,
step=self.step, estimator_params=self.estimator_params)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to len(cv) - 1
# here, the scores are normalized by len(cv)
self.grid_scores_ = scores / len(cv)
return self
| bsd-3-clause |
bobisme/odoo | addons/resource/faces/timescale.py | 170 | 3902 | ############################################################################
# Copyright (C) 2005 by Reithinger GmbH
# [email protected]
#
# This file is part of faces.
#
# faces is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# faces is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
############################################################################
import faces.pcalendar as pcal
import matplotlib.cbook as cbook
import datetime
import sys
class TimeScale(object):
def __init__(self, calendar):
self.data_calendar = calendar
self._create_chart_calendar()
self.now = self.to_num(self.data_calendar.now)
def to_datetime(self, xval):
return xval.to_datetime()
def to_num(self, date):
return self.chart_calendar.WorkingDate(date)
def is_free_slot(self, value):
dt1 = self.chart_calendar.to_starttime(value)
dt2 = self.data_calendar.to_starttime\
(self.data_calendar.from_datetime(dt1))
return dt1 != dt2
def is_free_day(self, value):
dt1 = self.chart_calendar.to_starttime(value)
dt2 = self.data_calendar.to_starttime\
(self.data_calendar.from_datetime(dt1))
return dt1.date() != dt2.date()
def _create_chart_calendar(self):
dcal = self.data_calendar
ccal = self.chart_calendar = pcal.Calendar()
ccal.minimum_time_unit = 1
#pad worktime slots of calendar (all days should be equally long)
slot_sum = lambda slots: sum(map(lambda slot: slot[1] - slot[0], slots))
day_sum = lambda day: slot_sum(dcal.get_working_times(day))
max_work_time = max(map(day_sum, range(7)))
#working_time should have 2/3
sum_time = 3 * max_work_time / 2
#now create timeslots for ccal
def create_time_slots(day):
src_slots = dcal.get_working_times(day)
slots = [0, src_slots, 24*60]
slots = tuple(cbook.flatten(slots))
slots = zip(slots[:-1], slots[1:])
#balance non working slots
work_time = slot_sum(src_slots)
non_work_time = sum_time - work_time
non_slots = filter(lambda s: s not in src_slots, slots)
non_slots = map(lambda s: (s[1] - s[0], s), non_slots)
non_slots.sort()
slots = []
i = 0
for l, s in non_slots:
delta = non_work_time / (len(non_slots) - i)
delta = min(l, delta)
non_work_time -= delta
slots.append((s[0], s[0] + delta))
i += 1
slots.extend(src_slots)
slots.sort()
return slots
min_delta = sys.maxint
for i in range(7):
slots = create_time_slots(i)
ccal.working_times[i] = slots
min_delta = min(min_delta, min(map(lambda s: s[1] - s[0], slots)))
ccal._recalc_working_time()
self.slot_delta = min_delta
self.day_delta = sum_time
self.week_delta = ccal.week_time
_default_scale = TimeScale(pcal._default_calendar)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
guildai/guild | examples/pipeline/prepare.py | 1 | 1846 | # License: BSD
# Author: Sasank Chilamkurthy
# Ref: https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
from __future__ import print_function, division
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import torch
import torchvision
from torchvision import datasets, transforms
from util import imshow
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
),
'val': transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
),
}
data_dir = 'data/hymenoptera_data'
image_datasets = {
x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])
for x in ['train', 'val']
}
dataloaders = {
x: torch.utils.data.DataLoader(
image_datasets[x], batch_size=4, shuffle=True, num_workers=4
)
for x in ['train', 'val']
}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
# Make a grid from batch
out = torchvision.utils.make_grid(inputs)
print("Saving sample images prepare-samples.png")
imshow(out, title=[class_names[x] for x in classes])
plt.savefig("prepare-samples.png")
for name, dataloader in dataloaders.items():
print("Saving data %s.pth" % name)
torch.save(dataloader, '%s.pth' % name)
| apache-2.0 |
googleinterns/betel | betel/app_page_scraper.py | 1 | 5385 | import pathlib
import urllib.error
import urllib.request
import logging
import bs4
import parmap
import pandas as pd
from betel import utils
from betel import info_files_helpers
from betel import betel_errors
class PlayAppPageScraper:
"""A class for scraping the icons and categories from Google Play Store
apps' web pages."""
_ICON_CLASS = "T75of sHb2Xb" # icon's tag's class
_APP_CATEGORY_ITEMPROP = "genre" # app's category's tag's itemprop
def __init__(self, base_url: str, storage_dir: pathlib.Path, category_filter: [str] = None):
"""Constructor.
:param base_url: base url of the apps store.
:param storage_dir: main storage directory for retrieved info.
:param category_filter: a list of categories whose apps are stored
(instead of the whole input)
"""
self._base_url = base_url
self._storage_dir = storage_dir
self._storage_dir.mkdir(exist_ok=True, parents=True)
self._info_file = storage_dir / utils.SCRAPER_INFO_FILE_NAME
self._log_file = storage_dir / utils.SCRAPER_LOG_FILE_NAME
logging.basicConfig(filename=self._log_file, filemode="a+")
self._category_filter = category_filter
def _build_app_page_url(self, app_id: str) -> str:
return self._base_url + "/details?id=" + app_id
def _get_app_page(self, app_id: str) -> bs4.BeautifulSoup:
url = self._build_app_page_url(app_id)
return _get_html(url)
def get_app_icon(self, app_id: str, subdir: pathlib.Path = "") -> None:
"""Scrapes the app icon URL from the app's Play Store details page,
downloads the corresponding app icon and saves it to
_storage_dir / subdir / icon_{app_id}.
:param app_id: the id of the app.
:param subdir: icon storage subdirectory inside _storage_dir base
directory.
"""
html = self._get_app_page(app_id)
src = self._scrape_icon_url(html)
self._download_icon(app_id, src, subdir)
def _scrape_icon_url(self, html: bs4.BeautifulSoup) -> str:
icon = html.find(class_=self._ICON_CLASS)
if icon is None:
raise betel_errors.PlayScrapingError("Icon class not found in html.")
return icon["src"]
def _download_icon(self, app_id: str, source: str, directory: pathlib.Path) -> None:
location = self._storage_dir / directory
location.mkdir(exist_ok=True, parents=True)
try:
urllib.request.urlretrieve(source, location / utils.get_app_icon_name(app_id))
except (urllib.error.HTTPError, urllib.error.URLError) as exception:
raise betel_errors.AccessError("Can not retrieve icon.", exception)
def get_app_category(self, app_id: str) -> str:
"""Scrapes the app category from the app's Play Store details page.
:param app_id: the id of the app.
:return: the category of the app in str format
"""
html = self._get_app_page(app_id)
return self._scrape_category(html).lower()
def _scrape_category(self, html: bs4.BeautifulSoup) -> str:
category = html.find(itemprop=self._APP_CATEGORY_ITEMPROP)
if category is None:
raise betel_errors.PlayScrapingError("Category itemprop not found in html.")
return category.get_text()
def store_app_info(self, app_id: str) -> None:
"""Adds an app to the data set by retrieving all the info
needed and appending it to the list of apps (kept in _info_file).
The app is only stored in the case that its category is in the
_category_filter list.
:param app_id: the id of the app.
"""
search_data_frame = utils.get_app_search_data_frame(app_id)
part_of_data_set = (
info_files_helpers.part_of_data_set(self._info_file, search_data_frame)
)
try:
if not part_of_data_set:
category = self.get_app_category(app_id)
if self._category_filter is None or category in self._category_filter:
self.get_app_icon(app_id)
self._write_app_info(app_id, category)
except betel_errors.BetelError as exception:
info = f"{app_id}, {getattr(exception, 'message', repr(exception))}"
logging.warning(info)
def _write_app_info(self, app_id: str, category: str) -> None:
app_info = _build_app_info_data_frame(app_id, category)
info_files_helpers.add_to_data(self._info_file, app_info)
def store_apps_info(self, app_ids: [str]) -> None:
"""Adds the specified apps to the data set by retrieving all the info
needed and appending them to the list of apps (kept in _info_file).
:param app_ids: array of app ids.
"""
app_ids = set(app_ids)
parmap.map(self.store_app_info, app_ids)
def _get_html(url: str) -> bs4.BeautifulSoup:
try:
page = urllib.request.urlopen(url)
soup = bs4.BeautifulSoup(page, 'html.parser')
return soup
except (urllib.error.HTTPError, urllib.error.URLError) as exception:
raise betel_errors.AccessError("Can not open URL.", exception)
def _build_app_info_data_frame(app_id: str, category: str) -> pd.DataFrame:
dictionary = {"app_id": app_id, "category": category}
return pd.DataFrame([dictionary])
| apache-2.0 |
Hezi-Resheff/location-based-behav | loc-vs-acc/location/demo.py | 1 | 1849 | """ The map trajectory demo """
import pandas as pd
import matplotlib.pyplot as plt
from location import *
from settings import *
from plots import *
"""
out = copmute_plot_fpt_std( "Storks_Africa__10_to_12_2012__with_behav.csv", hard_max=5)
out.to_csv(os.path.join(DATA_ROOT, "out", "fpt-r-var__hard_max_5.csv"))
exit(0)
"""
path = os.path.join(DATA_ROOT, "Storks_Africa__10_to_12_2012__with_behav.csv")
# Load
animal_data = pd.DataFrame.from_csv(path, header=None, parse_dates=[2])
animal_data.columns = ["bird_id", "date", "time", "gps_lat", "gps_long", "behav", "ODBA"]
animal_data = animal_data.loc[animal_data.bird_id == 2334]
animal_data = trajectory_processor(animal_data, stamp=True).compute_first_passage(1).cluster("FPT_1", k=3)
#animal_data.find_best_fpt()
params = {
'projection':'merc',
'lat_0':animal_data.gps_lat.mean(),
'lon_0':animal_data.gps_long.mean(),
'resolution':'h',
'area_thresh':0.1,
'llcrnrlon':animal_data.gps_long.min()-10,
'llcrnrlat':animal_data.gps_lat.min()-10,
'urcrnrlon':animal_data.gps_long.max()+10,
'urcrnrlat':animal_data.gps_lat.max()+10
}
#(compute_steps(animal_data)["speed"].copy()+1).apply(np.log10).hist(bins=25)
#plt.show()
plt.figure()
"""
map = MyBasemap(**params)
map.drawcoastlines()
map.fillcontinents(color = 'coral')
map.drawmapboundary()
map.drawcountries()
map.printcountries()
"""
# cluster
#clst = trajectory_cluster_1(compute_steps(animal_data), "speed")["cluster"].values
colors = list("rbgy")
# plot
x, y = animal_data.gps_long.values, animal_data.gps_lat.values
map = plt
map.plot(x,y, "ok", markersize=5)
for i in range(len(x)-2):
c = animal_data.ix[i, "cluster"]
if not np.isnan(c):
map.plot([x[i], x[i+1]], [y[i], y[i+1]], color=colors[c])
plt.show()
| mit |
mantidproject/mantid | qt/python/mantidqt/widgets/sliceviewer/imageinfowidget.py | 3 | 3090 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2020 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantidqt package
# std imports
from typing import Union
import sys
import numpy as np
from mantidqt.utils.qt import import_qt
from matplotlib.collections import QuadMesh
from matplotlib.image import AxesImage
from .lineplots import CursorTracker, cursor_info
from .transform import NonOrthogonalTransform
# Constants
DBLMAX = sys.float_info.max
ImageInfoWidget = import_qt('.._common', 'mantidqt.widgets', 'ImageInfoWidget')
class ImageInfoTracker(CursorTracker):
def __init__(self, image: Union[AxesImage, QuadMesh], transform: NonOrthogonalTransform,
do_transform: bool, widget: ImageInfoWidget):
"""
Update the image that the widget refers too.
:param: An AxesImage or Mesh instance to track
:param: transpose_xy: If true the cursor position should be transposed
before sending to the table update
"""
super().__init__(image_axes=image.axes, autoconnect=False)
self._image = image
self.transform = transform
self.do_transform = do_transform
self._widget = widget
if hasattr(image, 'get_extent'):
self.on_cursor_at = self._on_cursor_at_axesimage
else:
self.on_cursor_at = self._on_cursor_at_mesh
def on_cursor_outside_axes(self):
"""Update the image table given the mouse has moved out of the image axes"""
self._widget.cursorAt(DBLMAX, DBLMAX, DBLMAX)
# private api
def _on_cursor_at_axesimage(self, xdata: float, ydata: float):
"""
Update the image table for the given coordinates given an AxesImage
object.
:param xdata: X coordinate of cursor in data space
:param ydata: Y coordinate of cursor in data space
"""
if self._image is None:
return
cinfo = cursor_info(self._image, xdata, ydata)
if cinfo is not None:
arr, _, (i, j) = cinfo
if (0 <= i < arr.shape[0]) and (0 <= j < arr.shape[1]) and not np.ma.is_masked(arr[i, j]):
self._widget.cursorAt(xdata, ydata, arr[i, j])
def _on_cursor_at_mesh(self, xdata: float, ydata: float):
"""
Update the image table for the given coordinates for a mesh object.
This simply updates the position coordinates in the same fashion as
the standard matplotlib system as looking up the signal is not yet
supported.
:param xdata: X coordinate of cursor in data space
:param ydata: Y coordinate of cursor in data space
"""
if self._image is None:
return
if self.do_transform:
xdata, ydata = self.transform.inv_tr(xdata, ydata)
self._widget.cursorAt(xdata, ydata, DBLMAX)
| gpl-3.0 |
kakaba2009/MachineLearning | python/src/lstm/lstmTensorFlow.py | 1 | 4570 | from __future__ import print_function
import os
import time
import math
import warnings
import numpy as np
import tensorflow as tf
import tensorlayer as tl
import src.mylib.mlstm as mlstm
import src.mylib.mcalc as mcalc
import matplotlib.pyplot as plt
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Hide messy TensorFlow warnings
warnings.filterwarnings("ignore") # Hide messy Numpy warnings
tf.set_random_seed(0) # fix random seed
# Hyper Parameters
EPOCHSIZE = 25 # rnn epoch size
BATCHSIZE = 10 # rnn batch size
TIME_STEP = 5 # rnn time step
CELLSIZE = 32 # rnn cell size
FEATURES = 1 # rnn input size
DROPOUTS = 0.5
max_learning_rate = 0.001
min_learning_rate = 0.00001
decay_speed = 1000.0
ds = mlstm.loadFXData('JPY=X', '../db/forex.db', 1000)
ds = ds[['Close']].values
S, T = mcalc.split_x_y(ds)
total = S.shape[0]
def mshape(X):
# reshape input to be [samples, time steps, features]
return np.reshape(X, (-1, TIME_STEP, FEATURES))
# tensorflow placeholders
tf_x = tf.placeholder(tf.float32, [None, TIME_STEP, FEATURES]) # shape(batch, steps, feature)
tf_y = tf.placeholder(tf.float32, [None, TIME_STEP, FEATURES]) # input y
# variable learning rate
lr = tf.placeholder(tf.float32)
# RNN
rnn_cell = tf.contrib.rnn.BasicLSTMCell(num_units=CELLSIZE, forget_bias=1.0, state_is_tuple=True)
rnn_cell = tf.contrib.rnn.DropoutWrapper(rnn_cell, input_keep_prob=DROPOUTS)
init_s = rnn_cell.zero_state(batch_size=BATCHSIZE, dtype=tf.float32) # very first hidden state
outputs, final_s = tf.nn.dynamic_rnn(
rnn_cell, # cell you have chosen
tf_x, # input
initial_state=init_s, # the initial hidden state
time_major=False, # False: (batch, time step, input); True: (time step, batch, input)
)
outs2D = tf.reshape(outputs, [-1, CELLSIZE]) # reshape 3D output to 2D for fully connected layer
net_outs2D = tf.layers.dense(outs2D, FEATURES)
outs = tf.reshape(net_outs2D, [-1, TIME_STEP, FEATURES]) # reshape back to 3D
loss = tf.losses.mean_squared_error(labels=tf_y, predictions=outs) # compute cost
tf.summary.scalar("loss", loss)
train_op = tf.train.AdamOptimizer(lr).minimize(loss)
# Init for saving models. They will be saved into a directory named 'checkpoints'.
# Only the last checkpoint is kept.
if not os.path.exists("checkpoints"):
os.mkdir("checkpoints")
saver = tf.train.Saver(max_to_keep=1)
merged = tf.summary.merge_all()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer()) # initialize var in graph
saver.restore(sess, "checkpoints/lstmTensorFlow.ckpt")
print("lstmTensorFlow.ckpt restored")
summary_writer = tf.summary.FileWriter("../log/", sess.graph)
#plt.figure(1, figsize=(12, 5))
#plt.ion()
steps = 0
for epoch in range(EPOCHSIZE):
for x, y in tl.iterate.seq_minibatches(inputs=S, targets=T, batch_size=BATCHSIZE, seq_length=TIME_STEP, stride=1):
steps += 1
x = mshape(x) # shape (batch, time_step, input_size)
y = mshape(y) # shape (batch, time_step, input_size)
print(steps)
#print("x=", x.flatten()) # shape (batch*time_step, input_size)
#print("y=", y.flatten()) # shape (batch*time_step, input_size)
# learning rate decay
learning_rate = min_learning_rate + (max_learning_rate - min_learning_rate) * math.exp(-steps / decay_speed)
if 'final_s_' not in globals(): # first state, no any hidden state
feed_dict = {tf_x: x, tf_y: y, lr: learning_rate}
else: # has hidden state, so pass it to rnn
feed_dict = {tf_x: x, tf_y: y, lr: learning_rate, init_s: final_s_}
_, pred_, final_s_ = sess.run([train_op, outs, final_s], feed_dict) # train
if steps % 2 == 0:
# Calculate batch loss
bloss, summary = sess.run([loss, merged], feed_dict={tf_x: x, tf_y: y, lr: learning_rate})
print("Iter " + str(steps) + ", Minibatch Loss= " + "{:.6f}".format(bloss))
summary_writer.add_summary(summary, steps)
# plotting
#plt.plot(y.flatten(), 'r-')
#plt.plot(pred_.flatten(), 'b-')
#plt.draw()
#plt.pause(0.05)
save_path = saver.save(sess, "checkpoints/lstmTensorFlow.ckpt")
print("checkpoints saved:", save_path)
#plt.ioff()
#plt.show()
| apache-2.0 |
mne-tools/mne-tools.github.io | 0.17/_downloads/2098b3396fccfb6876e982723303bf93/plot_resample.py | 7 | 3448 | """
===============
Resampling data
===============
When performing experiments where timing is critical, a signal with a high
sampling rate is desired. However, having a signal with a much higher sampling
rate than is necessary needlessly consumes memory and slows down computations
operating on the data.
This example downsamples from 600 Hz to 100 Hz. This achieves a 6-fold
reduction in data size, at the cost of an equal loss of temporal resolution.
"""
# Authors: Marijn van Vliet <[email protected]>
#
# License: BSD (3-clause)
from matplotlib import pyplot as plt
import mne
from mne.datasets import sample
###############################################################################
# Setting up data paths and loading raw data (skip some data for speed)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
raw = mne.io.read_raw_fif(raw_fname).crop(120, 240).load_data()
###############################################################################
# Since downsampling reduces the timing precision of events, we recommend
# first extracting epochs and downsampling the Epochs object:
events = mne.find_events(raw)
epochs = mne.Epochs(raw, events, event_id=2, tmin=-0.1, tmax=0.8, preload=True)
# Downsample to 100 Hz
print('Original sampling rate:', epochs.info['sfreq'], 'Hz')
epochs_resampled = epochs.copy().resample(100, npad='auto')
print('New sampling rate:', epochs_resampled.info['sfreq'], 'Hz')
# Plot a piece of data to see the effects of downsampling
plt.figure(figsize=(7, 3))
n_samples_to_plot = int(0.5 * epochs.info['sfreq']) # plot 0.5 seconds of data
plt.plot(epochs.times[:n_samples_to_plot],
epochs.get_data()[0, 0, :n_samples_to_plot], color='black')
n_samples_to_plot = int(0.5 * epochs_resampled.info['sfreq'])
plt.plot(epochs_resampled.times[:n_samples_to_plot],
epochs_resampled.get_data()[0, 0, :n_samples_to_plot],
'-o', color='red')
plt.xlabel('time (s)')
plt.legend(['original', 'downsampled'], loc='best')
plt.title('Effect of downsampling')
mne.viz.tight_layout()
###############################################################################
# When resampling epochs is unwanted or impossible, for example when the data
# doesn't fit into memory or your analysis pipeline doesn't involve epochs at
# all, the alternative approach is to resample the continuous data. This
# can only be done on loaded or pre-loaded data.
# Resample to 300 Hz
raw_resampled = raw.copy().resample(300, npad='auto')
###############################################################################
# Because resampling also affects the stim channels, some trigger onsets might
# be lost in this case. While MNE attempts to downsample the stim channels in
# an intelligent manner to avoid this, the recommended approach is to find
# events on the original data before downsampling.
print('Number of events before resampling:', len(mne.find_events(raw)))
# Resample to 100 Hz (suppress the warning that would be emitted)
raw_resampled = raw.copy().resample(100, npad='auto', verbose='error')
print('Number of events after resampling:',
len(mne.find_events(raw_resampled)))
# To avoid losing events, jointly resample the data and event matrix
events = mne.find_events(raw)
raw_resampled, events_resampled = raw.copy().resample(
100, npad='auto', events=events)
print('Number of events after resampling:', len(events_resampled))
| bsd-3-clause |
BiaDarkia/scikit-learn | examples/gaussian_process/plot_gpr_noisy_targets.py | 12 | 3688 | """
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression example computed in two different ways:
1. A noise-free case
2. A noisy case with known noise-level per datapoint
In both cases, the kernel's parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``alpha`` is applied as a Tikhonov
regularization of the assumed covariance between the training points.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Jake Vanderplas <[email protected]>
# Jan Hendrik Metzen <[email protected]>s
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
# ----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instantiate a Gaussian Process model
kernel = C(1.0, (1e-3, 1e3)) * RBF(10, (1e-2, 1e2))
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, sigma = gp.predict(x, return_std=True)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
plt.figure()
plt.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'r.', markersize=10, label=u'Observations')
plt.plot(x, y_pred, 'b-', label=u'Prediction')
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
# ----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Instantiate a Gaussian Process model
gp = GaussianProcessRegressor(kernel=kernel, alpha=dy ** 2,
n_restarts_optimizer=10)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, sigma = gp.predict(x, return_std=True)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
plt.figure()
plt.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
plt.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
plt.plot(x, y_pred, 'b-', label=u'Prediction')
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
lionelBytes/CSERF | stacked_bar_graph.py | 1 | 12103 | #!/usr/bin/env python
###############################################################################
# #
# stackedBarGraph.py - code for creating purdy stacked bar graphs #
# #
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
__author__ = "Michael Imelfort"
__copyright__ = "Copyright 2014"
__credits__ = ["Michael Imelfort"]
__license__ = "GPL3"
__version__ = "0.0.1"
__maintainer__ = "Michael Imelfort"
__email__ = "[email protected]"
__status__ = "Development"
###############################################################################
import numpy as np
from matplotlib import pyplot as plt
###############################################################################
class StackedBarGrapher:
"""Container class"""
def __init__(self): pass
def demo(self):
d = np.array([[101.,0.,0.,0.,0.,0.,0.],
[92.,3.,0.,4.,5.,6.,0.],
[56.,7.,8.,9.,23.,4.,5.],
[81.,2.,4.,5.,32.,33.,4.],
[0.,45.,2.,3.,45.,67.,8.],
[99.,5.,0.,0.,0.,43.,56.]])
d_heights = [1.,2.,3.,4.,5.,6.]
d_widths = [.5,1.,3.,2.,1.,2.]
d_labels = ["fred","julie","sam","peter","rob","baz"]
d_colors = ['#2166ac', '#fee090', '#fdbb84', '#fc8d59', '#e34a33', '#b30000', '#777777']
gap = 0.05
fig = plt.figure()
ax1 = fig.add_subplot(321)
self.stackedBarPlot(ax1,
d,
d_colors,
edgeCols=['#000000']*7,
xLabels=d_labels,
)
plt.title("Straight up stacked bars")
ax2 = fig.add_subplot(322)
self.stackedBarPlot(ax2,
d,
d_colors,
edgeCols=['#000000']*7,
xLabels=d_labels,
scale=True
)
plt.title("Scaled bars")
ax3 = fig.add_subplot(323)
self.stackedBarPlot(ax3,
d,
d_colors,
edgeCols=['#000000']*7,
xLabels=d_labels,
heights=d_heights,
yTicks=7,
)
plt.title("Bars with set heights")
ax4 = fig.add_subplot(324)
self.stackedBarPlot(ax4,
d,
d_colors,
edgeCols=['#000000']*7,
xLabels=d_labels,
yTicks=7,
widths=d_widths,
scale=True
)
plt.title("Scaled bars with set widths")
ax5 = fig.add_subplot(325)
self.stackedBarPlot(ax5,
d,
d_colors,
edgeCols=['#000000']*7,
xLabels=d_labels,
gap=gap
)
plt.title("Straight up stacked bars + gaps")
ax6 = fig.add_subplot(326)
self.stackedBarPlot(ax6,
d,
d_colors,
edgeCols=['#000000']*7,
xLabels=d_labels,
scale=True,
gap=gap,
endGaps=True
)
plt.title("Scaled bars + gaps + end gaps")
# We change the fontsize of minor ticks label
fig.subplots_adjust(bottom=0.4)
plt.tight_layout()
plt.show()
plt.close(fig)
del fig
def stackedBarPlot(self,
ax, # axes to plot onto
data, # data to plot
cols, # colors for each level
xLabels = None, # bar specific labels
yTicks = 6., # information used for making y ticks ["none", <int> or [[tick_pos1, tick_pos2, ... ],[tick_label_1, tick_label2, ...]]
edgeCols=None, # colors for edges
showFirst=-1, # only plot the first <showFirst> bars
scale=False, # scale bars to same height
widths=None, # set widths for each bar
heights=None, # set heights for each bar
ylabel='', # label for x axis
xlabel='', # label for y axis
gap=0., # gap between bars
endGaps=False, # allow gaps at end of bar chart (only used if gaps != 0.)
seriesLabels=None
):
#------------------------------------------------------------------------------
# data fixeratering
# make sure this makes sense
if showFirst != -1:
showFirst = np.min([showFirst, np.shape(data)[0]])
data_copy = np.copy(data[:showFirst]).transpose().astype('float')
data_shape = np.shape(data_copy)
if heights is not None:
heights = heights[:showFirst]
if widths is not None:
widths = widths[:showFirst]
showFirst = -1
else:
data_copy = np.copy(data).transpose()
data_shape = np.shape(data_copy)
# determine the number of bars and corresponding levels from the shape of the data
num_bars = data_shape[1]
levels = data_shape[0]
if widths is None:
widths = np.array([1] * num_bars)
x = np.arange(num_bars)
else:
x = [0]
for i in range(1, len(widths)):
x.append(x[i-1] + (widths[i-1] + widths[i])/2)
# stack the data --
# replace the value in each level by the cumulative sum of all preceding levels
data_stack = np.reshape([float(i) for i in np.ravel(np.cumsum(data_copy, axis=0))], data_shape)
# scale the data is needed
if scale:
data_copy /= data_stack[levels-1]
data_stack /= data_stack[levels-1]
if heights is not None:
print "WARNING: setting scale and heights does not make sense."
heights = None
elif heights is not None:
data_copy /= data_stack[levels-1]
data_stack /= data_stack[levels-1]
for i in np.arange(num_bars):
data_copy[:,i] *= heights[i]
data_stack[:,i] *= heights[i]
#------------------------------------------------------------------------------
# ticks
if yTicks is not "none":
# it is either a set of ticks or the number of auto ticks to make
real_ticks = True
try:
k = len(yTicks[1])
except:
real_ticks = False
if not real_ticks:
yTicks = float(yTicks)
if scale:
# make the ticks line up to 100 %
y_ticks_at = np.arange(yTicks)/(yTicks-1)
y_tick_labels = np.array(["%0.2f"%(i * 100) for i in y_ticks_at])
else:
# space the ticks along the y axis
y_ticks_at = np.arange(yTicks)/(yTicks-1)*np.max(data_stack)
y_tick_labels = np.array([str(i) for i in y_ticks_at])
yTicks=(y_ticks_at, y_tick_labels)
#------------------------------------------------------------------------------
# plot
if edgeCols is None:
edgeCols = ["none"]*len(cols)
# take cae of gaps
gapd_widths = [i - gap for i in widths]
# bars
ax.bar(x,
data_stack[0],
color=cols[0],
edgecolor=edgeCols[0],
width=gapd_widths,
linewidth=0.5,
align='center',
label = seriesLabels[0]
)
for i in np.arange(1,levels):
ax.bar(x,
data_copy[i],
bottom=data_stack[i-1],
color=cols[i],
edgecolor=edgeCols[i],
width=gapd_widths,
linewidth=0.5,
align='center',
label = seriesLabels[i]
)
# borders
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
# make ticks if necessary
if yTicks is not "none":
ax.tick_params(axis='y', which='both', labelsize=8, direction="out")
ax.yaxis.tick_left()
plt.yticks(yTicks[0], yTicks[1])
else:
plt.yticks([], [])
if xLabels is not None:
ax.tick_params(axis='x', which='both', labelsize=8, direction="out")
ax.xaxis.tick_bottom()
plt.xticks(x, xLabels, rotation='vertical')
else:
plt.xticks([], [])
# limits
if endGaps:
ax.set_xlim(-1.*widths[0]/2. - gap/2., np.sum(widths)-widths[0]/2. + gap/2.)
else:
ax.set_xlim(-1.*widths[0]/2. + gap/2., np.sum(widths)-widths[0]/2. - gap/2.)
ax.set_ylim(0, yTicks[0][-1])#np.max(data_stack))
# labels
if xlabel != '':
plt.xlabel(xlabel)
if ylabel != '':
plt.ylabel(ylabel)
###############################################################################
###############################################################################
###############################################################################
###############################################################################
if __name__ == '__main__':
SBG = StackedBarGrapher()
SBG.demo()
###############################################################################
###############################################################################
###############################################################################
############################################################################### | gpl-2.0 |
mattphysics/PNRESD | FIGURES/Figure_6/Figure_6a.py | 1 | 2734 | # Generate Figure 6a of Van Zalinge et al. (2017), On determining the point of no return in climate change, Earth System Dynamics.
import numpy as N
import matplotlib.pyplot as plt
import matplotlib.colors
import matplotlib as mpl
mpl.rcParams['lines.linewidth'] = 2.
#########################
#THE POINT OF NO RETURNS#
#########################
PofnoR45=2055
PofnoR60=2057
PofnoR85=2047
PofnoR26=2097
###############
#RCP SCENARIOS#
###############
file = open("../../DATA/RCP_Data/RCP45.dat","r")
lines = file.readlines()
RCP45=N.array(lines,'f')
file = open("../../DATA/RCP_Data/RCP85.dat","r")
lines = file.readlines()
RCP85=N.array(lines,'f')
file = open("../../DATA/RCP_Data/RCP60.dat","r")
lines = file.readlines()
RCP60=N.array(lines,'f')
file = open("../../DATA/RCP_Data/RCP26.dat","r")
lines = file.readlines()
RCP26=N.array(lines,'f')
###################
#EXPONENTIAL DECAY#
###################
def f(t,C_0):
y=(C_0-400.)*N.exp(-t/25.)+400.
return y
tt=N.arange(2000,2200)
############
#PLOT RCP85#
############
plt.plot(tt,RCP85,color='#9D0444')
plt.plot(tt[PofnoR85-2000:],f(N.arange(len(tt[PofnoR85-2000:])),RCP85[PofnoR85-2000]),color='#9D0444',linestyle=':')
plt.scatter(tt[PofnoR85-2000:][0],RCP85[PofnoR85-2000], marker='o',s=90, color='#9D0444',linestyle=':',label='RCP8.5 $\pi_t=2047$')
############
#PLOT RCP60#
############
plt.plot(tt,RCP60,color='#F37C51')
plt.plot(tt[PofnoR60-2000:],f(N.arange(len(tt[PofnoR60-2000:])),RCP60[PofnoR60-2000]),color='#F37C51',linestyle=':')
plt.scatter(tt[PofnoR60-2000:][0],RCP60[PofnoR60-2000], marker='o',s=90, color='#F37C51',linestyle=':',label='RCP6.0 $\pi_t=2057$')
############
#PLOT RCP45#
############
plt.plot(tt,RCP45,color='#0066FF')
plt.plot(tt[PofnoR45-2000:],f(N.arange(len(tt[PofnoR45-2000:])),RCP45[PofnoR45-2000]),color='#0066FF',linestyle=':')
plt.scatter(tt[PofnoR45-2000:][0],RCP45[PofnoR45-2000], marker='o',s=90, color='#0066FF',linestyle=':',label='RCP4.5 $\pi_t=2055$')
############
#PLOT RCP26#
############
plt.plot(tt,RCP26,color='#148F77')
plt.plot(tt[PofnoR26-2000:],f(N.arange(len(tt[PofnoR26-2000:])),RCP26[PofnoR26-2000]),color='#148F77',linestyle=':')
plt.scatter(tt[PofnoR26-2000:][0],RCP26[PofnoR26-2000], marker='o',s=90, color='#148F77',linestyle=':',label='RCP2.6 $\pi_t=2097$')
####################
#CUSTOMIZING LAYOUT#
####################
plt.xticks(fontsize='16')
plt.yticks(fontsize='16')
plt.xlabel("Year", fontsize=20)
plt.ylabel('CO$_2$eq (ppmv)',fontsize='20')
plt.gcf().subplots_adjust(bottom=0.13)
#plt.legend(scatterpoints=1, loc=1, fontsize='16')
plt.legend(scatterpoints=1, fontsize='16', loc=3, ncol=2)#, mode='expand', scatterpoints=1, fontsize='16')
plt.axis([2000,2150,250,750])
plt.show()
| gpl-3.0 |
HeraclesHX/scikit-learn | sklearn/ensemble/tests/test_voting_classifier.py | 140 | 6926 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.grid_search import GridSearchCV
from sklearn import datasets
from sklearn import cross_validation
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target
def test_majority_label_iris():
"""Check classification by majority label on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
scores = cross_validation.cross_val_score(eclf,
X,
y,
cv=5,
scoring='accuracy')
assert_almost_equal(scores.mean(), 0.95, decimal=2)
def test_tie_situation():
"""Check voting classifier selects smaller class label in tie situation."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)],
voting='hard')
assert_equal(clf1.fit(X, y).predict(X)[73], 2)
assert_equal(clf2.fit(X, y).predict(X)[73], 1)
assert_equal(eclf.fit(X, y).predict(X)[73], 1)
def test_weights_iris():
"""Check classification by average probabilities on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 2, 10])
scores = cross_validation.cross_val_score(eclf,
X,
y,
cv=5,
scoring='accuracy')
assert_almost_equal(scores.mean(), 0.93, decimal=2)
def test_predict_on_toy_problem():
"""Manually check predicted class labels for toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5],
[-1.2, -1.4],
[-3.4, -2.2],
[1.1, 1.2],
[2.1, 1.4],
[3.1, 2.3]])
y = np.array([1, 1, 1, 2, 2, 2])
assert_equal(all(clf1.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf2.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf3.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
def test_predict_proba_on_toy_problem():
"""Calculate predicted probabilities on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
clf1_res = np.array([[0.59790391, 0.40209609],
[0.57622162, 0.42377838],
[0.50728456, 0.49271544],
[0.40241774, 0.59758226]])
clf2_res = np.array([[0.8, 0.2],
[0.8, 0.2],
[0.2, 0.8],
[0.3, 0.7]])
clf3_res = np.array([[0.9985082, 0.0014918],
[0.99845843, 0.00154157],
[0., 1.],
[0., 1.]])
t00 = (2*clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4
t11 = (2*clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4
t21 = (2*clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4
t31 = (2*clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[2, 1, 1])
eclf_res = eclf.fit(X, y).predict_proba(X)
assert_almost_equal(t00, eclf_res[0][0], decimal=1)
assert_almost_equal(t11, eclf_res[1][1], decimal=1)
assert_almost_equal(t21, eclf_res[2][1], decimal=1)
assert_almost_equal(t31, eclf_res[3][1], decimal=1)
try:
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
eclf.fit(X, y).predict_proba(X)
except AttributeError:
pass
else:
raise AssertionError('AttributeError for voting == "hard"'
' and with predict_proba not raised')
def test_multilabel():
"""Check if error is raised for multilabel classification."""
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=123)
clf = OneVsRestClassifier(SVC(kernel='linear'))
eclf = VotingClassifier(estimators=[('ovr', clf)], voting='hard')
try:
eclf.fit(X, y)
except NotImplementedError:
return
def test_gridsearch():
"""Check GridSearch support."""
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft')
params = {'lr__C': [1.0, 100.0],
'voting': ['soft', 'hard'],
'weights': [[0.5, 0.5, 0.5], [1.0, 0.5, 0.5]]}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
grid.fit(iris.data, iris.target)
| bsd-3-clause |
MattWellie/PAGE_MPO | tsv_gene_names_grab.py | 1 | 3555 | import csv, cPickle
import numpy as np
import matplotlib.pyplot as plt
"""
Something quick to get a set of genes from a csv file
"""
file_in = 'batch_query_no_infertile.tsv'
field = 'human_gene_symbol'
ddg2p = 'DDG2P.csv'
annotations = 'annotations.cPickle'
all_output = 'tsv_names_summary_out.txt'
gene_set = set()
gene_duplicates = set()
printed_lines = []
# Import the file
with open(file_in, 'rU') as handle:
dict = csv.DictReader(handle, delimiter='\t')
for row in dict:
gene_list = row[field].split('|')
printed_lines.append('{} - {}: {}'.format(row['mp_id'], row['mp_definition'], len(gene_list)))
for gene in gene_list:
if gene in gene_set:
gene_duplicates.add(gene)
else:
gene_set.add(gene)
printed_lines.append('Unique genes found: {}'.format(len(gene_set)))
printed_lines.append('{} genes were present in multiple categories:\n'.format(len(gene_duplicates)))
printed_lines.append(gene_duplicates)
# Dump the gene set to a pickle file
with open('genes_of_interest.cPickle', 'w') as handle:
cPickle.dump(gene_set, handle)
# Grab all the gene names from the DDG2P input file
ddg2p_set = set()
first_line = True
with open(ddg2p, 'r') as handle:
for line in handle:
if first_line:
first_line = False
else:
ddg2p_set.add(line.split(',')[0])
# Identify any overlapping genes:
ddg2p_overlap = set()
for gene in gene_set:
if gene in ddg2p_set:
ddg2p_overlap.add(gene)
# Dump the gene set to a pickle file
with open('ddg2p_overlap_genes.cPickle', 'w') as handle:
cPickle.dump(ddg2p_overlap, handle)
printed_lines.append('Total phenotype genes overlapping DDG2P: {}'.format(len(ddg2p_overlap)))
printed_lines.append(ddg2p_overlap)
# Import and use the pickled set of annotations from the DDD project
# This contains the HI, HS, and phenotype details where available
with open(annotations, 'r') as handle:
anno_dict = cPickle.load(handle)
# Create a list to hold all the
hi_scores = []
annotated_genes = set()
not_found = set()
for gene in ddg2p_overlap:
found = False
for chromosome in anno_dict:
if gene in anno_dict[chromosome]:
found = True
annotated_genes.add(gene)
printed_lines.append('\nHI Gene Annotations for {}'.format(gene))
ann_keys = anno_dict[chromosome][gene].keys()
if 'hi_score' in ann_keys:
printed_lines.append('\tHI: {}'.format(anno_dict[chromosome][gene]['hi_score']))
hi_scores.append(float(anno_dict[chromosome][gene]['hi_score']))
if 'hs_score' in ann_keys:
printed_lines.append('\tHS: {}'.format(anno_dict[chromosome][gene]['hs_score']))
if 'diseases' in ann_keys:
for disease in anno_dict[chromosome][gene]['diseases']:
printed_lines.append('\t{}'.format(disease))
if not found:
not_found.add(gene)
printed_lines.append('\n{}/{} Genes had annotations available'.format(len(annotated_genes), len(ddg2p_overlap)))
printed_lines.append('{} Genes didn\'t have annotations:'.format(len(not_found)))
printed_lines.append(not_found)
with open(all_output, 'wb') as handle:
for line in printed_lines:
print >>handle, line
# Maybe try and plot this as a graph
line = plt.figure()
plt.plot(sorted(hi_scores), 'o')
plt.ylabel('HI Score')
plt.xlabel('Gene (sorted by HI score)')
plt.title('A scatter plot of all HI scores')
plt.show() | apache-2.0 |
zigitax/king-phisher | tools/cx_freeze.py | 3 | 4410 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# tools/cx_freeze.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import site
import sys
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from king_phisher import version
import matplotlib
from mpl_toolkits import basemap
from cx_Freeze import setup, Executable
is_debugging_build = bool(os.environ.get('DEBUG'))
include_dll_path = os.path.join(site.getsitepackages()[1], 'gnome')
# DLLs from site-packages\gnome\
missing_dlls = [
'libaspell-15.dll',
'libatk-1.0-0.dll',
'libcairo-gobject-2.dll',
'libdbus-1-3.dll',
'libdbus-glib-1-2.dll',
'libenchant.dll',
'lib\enchant\libenchant_aspell.dll',
'lib\enchant\libenchant_hspell.dll',
'lib\enchant\libenchant_ispell.dll',
'lib\enchant\libenchant_myspell.dll',
'lib\enchant\libenchant_voikko.dll',
'libffi-6.dll',
'libfontconfig-1.dll',
'libfreetype-6.dll',
'libgailutil-3-0.dll',
'libgdk-3-0.dll',
'libgdk_pixbuf-2.0-0.dll',
'libgeoclue-0.dll',
'libgio-2.0-0.dll',
'lib\gio\modules\libgiolibproxy.dll',
'libgirepository-1.0-1.dll',
'libglib-2.0-0.dll',
'libgmodule-2.0-0.dll',
'libgobject-2.0-0.dll',
'libgstapp-1.0-0.dll',
'libgstaudio-1.0-0.dll',
'libgstbase-1.0-0.dll',
'libgstpbutils-1.0-0.dll',
'libgstreamer-1.0-0.dll',
'libgsttag-1.0-0.dll',
'libgstvideo-1.0-0.dll',
'libgtk-3-0.dll',
'libgtksourceview-3.0-1.dll',
'libharfbuzz-gobject-0.dll',
'libintl-8.dll',
'libjavascriptcoregtk-3.0-0.dll',
'libjpeg-8.dll',
'liborc-0.4-0.dll',
'libpango-1.0-0.dll',
'libpangocairo-1.0-0.dll',
'libpangoft2-1.0-0.dll',
'libpangowin32-1.0-0.dll',
'libpng16-16.dll',
'libproxy.dll',
'libpyglib-gi-2.0-python27-0.dll',
'librsvg-2-2.dll',
'libsoup-2.4-1.dll',
'libsqlite3-0.dll',
'libwebkitgtk-3.0-0.dll',
'libwebp-4.dll',
'libwinpthread-1.dll',
'libxml2-2.dll',
'libzzz.dll',
]
include_files = []
for dll in missing_dlls:
include_files.append((os.path.join(include_dll_path, dll), dll))
gtk_libs = ['etc', 'lib', 'share']
for lib in gtk_libs:
include_files.append((os.path.join(include_dll_path, lib), lib))
include_files.append((matplotlib.get_data_path(), 'mpl-data'))
include_files.append((basemap.basemap_datadir, 'mpl-basemap-data'))
include_files.append(('data/client/king_phisher', 'king_phisher'))
exe_base = 'Win32GUI'
if is_debugging_build:
exe_base = 'Console'
executables = [
Executable(
'KingPhisher',
base=exe_base,
icon='data/client/king_phisher/king-phisher-icon.ico',
shortcutName='KingPhisher',
shortcutDir='ProgramMenuFolder'
)
]
build_exe_options = dict(
compressed=False,
include_files=include_files,
packages=[
'cairo',
'email',
'gi',
'jinja2',
'matplotlib',
'mpl_toolkits',
'msgpack',
'paramiko'
]
)
setup(
name='KingPhisher',
author='Spencer McIntyre',
version=version.distutils_version,
description='King Phisher Client',
options=dict(build_exe=build_exe_options),
executables=executables
)
| bsd-3-clause |
elliottd/vdrparser | PrepareImages.py | 1 | 1430 | import pandas as pd
import os
import sys
from collections import defaultdict
import argparse
import glob
class PrepareImages:
def __init__(self, args):
self.args = args
'''
Splits a collection of HDF-formatted files into one per image.
'''
def prepare(self):
files = glob.glob("%s/*.h5" % self.args.path)
files = sorted(files)
for idx,x in enumerate(files):
print "Loading HDF5 files ... "
ndf = pd.read_hdf("%s" % (x), 'df')
# HDF always stores absolute paths but we don't want to deal with that so strip them out.
ndf.index = [x.split("/")[-1] for x in ndf.index]
image_cols = defaultdict(list)
for idx,x in enumerate(ndf.index):
image_cols[x].append(idx)
for x in image_cols.keys():
data = ndf[image_cols[x][0]:image_cols[x][-1]]
ofilename = x.split("/")[-1]
ofilename = ofilename.replace("jpg", "hdf")
print "Rewriting %s" % ofilename
data.to_hdf("%s/%s" % (self.args.path, ofilename), 'df')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Process the raw HDF-processed data into a separate .hdf file for each image.")
parser.add_argument("--path", help="Path to the input HDF files. Only reads from .h5 extensions", required=True)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
p = PrepareImages(parser.parse_args())
p.prepare()
| apache-2.0 |
droundy/deft | papers/fuzzy-fmt/figs/plot-weights.py | 1 | 2125 | #!/usr/bin/python
# We need the following two lines in order for matplotlib to work
# without access to an X server.
import matplotlib, sys
if 'show' not in sys.argv:
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
import findxi
import scipy.special
T = 1.0
alpha = findxi.find_alpha(T)
Xi = findxi.find_Xi(T)
data = np.loadtxt('figs/weight-functions-%g.dat' % T)
# x y z n n3 n2 n1 n0 n2x n2y n2z n2xx n2yy n2zz n2xy n2yz n2zx
x = data[:,0]
y = data[:,1]
z = data[:,2]
n = data[:,3]
n3 = data[:,4]
n2 = data[:,5]
n1 = data[:,6]
n0 = data[:,7]
n2x = data[:,8]
n2y = data[:,9]
n2z = data[:,10]
n2xx = data[:,11]
n2yy = data[:,12]
n2zz = data[:,13]
n2xy = data[:,14]
n2yz = data[:,15]
n2zx = data[:,16]
print(z)
r = np.sqrt(x**2+y**2+z**2)
def plotone(f,name):
plt.ylabel(name)
plt.xlabel('z')
plt.plot(z, f, label=name)
w2 = np.sqrt(2/np.pi)/Xi*np.exp(-((r-alpha/2)/(Xi/np.sqrt(2)))**2)
plt.figure()
plotone(n, 'n')
plotone(n0, 'n0')
plt.plot(z, w2/(4*np.pi*r**2), '--', label='theory')
plt.plot(z, w2/(4*np.pi*r**2)/2, '--', label='half theory')
plt.ylim(0, 1.2*n0.max())
plt.legend(loc='best')
plt.figure()
plotone(n3, 'n3')
plt.plot(z, 0.5*(1-scipy.special.erf((r-alpha/2)/(Xi/np.sqrt(2)))), '--', label='theory')
plt.legend(loc='best')
plt.figure()
plotone(n2, 'n2')
plt.plot(z, w2, '--', label='theory')
plt.legend(loc='best')
plt.figure()
plotone(n1, 'n1')
plt.plot(z, w2/(4*np.pi*r), '--', label='theory')
plt.legend(loc='best')
plt.figure()
plotone(n2x, 'n2x')
plt.plot(z, w2*x/r, '--', label='x theory')
plotone(n2y, 'n2y')
plt.plot(z, w2*y/r, '--', label='y theory')
plotone(n2z, 'n2z')
plt.plot(z, w2*z/r, '--', label='z theory')
plt.plot(z, w2*z/r/2, '--', label='half z theory')
plt.legend(loc='best')
plt.figure()
plotone(n2xx, 'n2xx')
plt.plot(z, w2*(x**2/r**2 - 1./3), '--', label='xx theory')
plotone(n2yy, 'n2yy')
plt.plot(z, w2*(y**2/r**2 - 1./3), '--', label='yy theory')
plotone(n2zz, 'n2zz')
plt.plot(z, w2*(z**2/r**2 - 1./3), '--', label='zz theory')
plotone(n2xy, 'n2xy')
plotone(n2yz, 'n2yz')
plotone(n2zx, 'n2zx')
plt.legend(loc='best')
plt.show()
| gpl-2.0 |
bongsoos/pythontools | pythontools/plottools.py | 1 | 7705 | '''
plottools.py
Plotting tools
Tools
-----
makefig
set_axis
simpleaxis
get_color
set_label
savefig
author: Bongsoo Suh
created: 2015-09-01
(C) 2015 bongsoos
'''
import numpy as _np
from matplotlib import pyplot as _plt
from mpl_toolkits.axes_grid1 import make_axes_locatable as _mal
import matplotlib.patches as _mpatches
import matplotlib.colors as _colors
import matplotlib.cm as _cmx
import matplotlib as mpl
NCURVES = 10
_np.random.seed(101)
curves = [_np.random.random(20) for i in range(NCURVES)]
values = range(NCURVES)
def get_color(cmap, idx):
'''
get color
'''
colormap = _plt.get_cmap(cmap)
cNorm = _colors.Normalize(vmin=0, vmax=values[-1])
scalarMap = _cmx.ScalarMappable(norm=cNorm, cmap=colormap)
colorVal = scalarMap.to_rgba(values[idx])
return colorVal
def simpleaxis(ax):
'''
simple axis
'''
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
def set_axis(ax, ax_disp=['left','bottom'], x_ticks=None, x_ticks_label=None, y_ticks=None, y_ticks_label=None,
lbsize=13, islabel=True, ax_linewidth=0.8, ax_tickwidth=0.8, xlim=None, ylim=None, islog_x=False, islog_y=False):
'''
Axis setup.
Usage
-----
set_axis(ax, x_ticks=[0, 1], y_ticks=[0, 2])
set_axis(ax, x_ticks=[0, 1], y_ticks=[0, 2], islabel=False) # this will not display axis labels
set_axis(ax, x_ticks=[0, 1], y_ticks=[0, 2], xlim=[0,5], ylim=[0,1]) # this will not display axis labels
Inputs
------
ax (object):
Pass the figure axis object.
ax_disp (list):
Choose axis that you want to display. 'top', 'right' 'left','bottom'
Default = ['left', 'bottom']
x_ticks (list):
x tick location. ex) [-1, 0, 1]
y_ticks (list):
y tick location. ex) [-1, 0, 1]
x_ticks_label (list):
x tick label. ex) [-1, 0, 1]
y_ticks_label (list):
y tick label. ex) [-1, 0, 1]
lbsize (int):
label size (default = 20)
islabel (bool):
Select to display the axis labels(True) or not(False).
ax_linewidth (int):
axis line width (default = 2)
ax_tickwidth (int):
axis tick width (default = 2)
xlim (list):
x axis range. ex) [-1, 1]
ylim (list):
y axis range. ex) [0, 1]
islog_x (bool):
set x axis into log scale
islog_y (bool):
set y axis into log scale
'''
# set axis
all_axes = ['top','right','left','bottom']
for d in all_axes:
if d not in ax_disp:
ax.spines[d].set_visible(False)
for d in ax_disp:
ax.spines[d].set_linewidth(ax_linewidth)
# set ticks
# axis: changes apply to 'x' or 'y' axis
# which: 'both'. both major and minor ticks are affected
# top, bottom, left, right: 'off'. ticks along the axis edges are off
# labelbottom, labelright: 'off'. turn off the label
if ['top','right'] not in ax_disp:
ax.tick_params(axis='x', which='both', top='off', labelsize=lbsize, width=ax_tickwidth)
ax.tick_params(axis='y', which='both', right='off', labelright='off', labelsize=lbsize, width=ax_tickwidth)
# set tick labels
if x_ticks is not None:
ax.set_xticks(x_ticks)
if y_ticks is not None:
ax.set_yticks(y_ticks)
if x_ticks_label is not None:
ax.set_xticklabels(x_ticks_label)
if y_ticks_label is not None:
ax.set_yticklabels(y_ticks_label)
if not islabel:
ax.tick_params(labelbottom='off',labelleft='off')
if xlim is not None:
ax.set_xlim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
if islog_x:
ax.set_xscale('log')
if islog_y:
ax.set_yscale('log')
return
def set_label(ax, title=' ', x_label=' ', y_label=' ', ftsize=13, title_ftsize=None, title_offset=1.05, islabel=True):
'''
Set labels
Inputs
------
ax (object):
axis object
title (string)
x_label (string)
y_label (string)
ftsize (int):
font size for title, x_label, y_label. Default = 20
lbsize (int):
label size for axis. Default = 20
'''
if islabel:
if title_ftsize is None:
ax.set_title(title, fontsize=ftsize, y=title_offset)
else:
ax.set_title(title, fontsize=title_ftsize, y=title_offset)
ax.set_xlabel(x_label, fontsize=ftsize)
ax.set_ylabel(y_label, fontsize=ftsize)
return
# fig_class
def makefig(figsize=(5,5), num_plots=1, dpi=500):
'''
Make figure
Usage
-----
makefig()
makefig(figsize=(3,3))
Inputs
------
figsize (tuple):
figure size (width, height)
ex) (3,3)
num_plots (int):
number of plots(subplots) in the figure. Default = 1
title (string):
x_label (string):
y_label (string):
Outputs
-------
fig (object):
figure object
ax (object, dictionary):
axis object[dictionary].
For num_plots=1 case, returns one ax object.
For num_plots>1 case, returns dictionary. Axes can be accessed using the key values
starting from 0 to num_plots-1. ex) ax[0]~ax[num_plots-1].
'''
fig = _plt.figure(figsize=figsize, dpi=dpi)
if num_plots == 1:
ax = fig.add_subplot(111)
# fig.tight_layout()
else:
### figure subplot grid (row = x, col = y)
plotsize_x = _np.int(_np.ceil(num_plots/3))
plotsize_y = _np.int(_np.ceil(num_plots/plotsize_x))
figsize_x = figsize[1]
figsize_y = figsize[0]
fig = _plt.figure(figsize=(plotsize_y*figsize_y, plotsize_x*figsize_x))
ax = {}
for i in range(num_plots):
ax[i] = fig.add_subplot(plotsize_x, plotsize_y, i+1)
fig.tight_layout()
# return fig, ax objects
return fig, ax
def savefig(fig, filename=None, dpi=500, tight=False):
'''
save figure
Usage
-----
savefig(fig, 'fig1') # this will save fig into fig1.png file.
Inputs
------
fig (object):
figure object created using makefig or matplotlib pyplot.
filename (string):
file name that figure will be saved.
'''
if tight:
fig.tight_layout()
if filename is not None:
fig.savefig(filename, bbox_inches='tight', dpi=dpi)
else:
print("File name must be given. Usage: ex) savefig(fig, 'fig1')" )
return
def text(ax, string, pos=[0.3,0.65], color='k', ftsize=8):
'''
insert text
'''
ax.text(pos[0], pos[1], string, verticalalignment='bottom', horizontalalignment='left',transform=ax.transAxes, color=color, fontsize=ftsize)
def scatter(ax, x, y, z, ms=20, _marker_='o', _cmap_='jet'):
'''
scatter plot
'''
scat = ax.scatter(x, y, s=ms, c=z, marker=_marker_, cmap=_cmap_, linewidths='0')
cbar = _plt.colorbar(scat)
return cbar
def set_cbar(cbar, ticks=None, ticklabels=None, limits=None, label=None, ftsize=12, lbsize=12):
'''
set colorbar
'''
if ticks is not None:
cbar.set_ticks(ticks)
if ticklabels is not None:
cbar.ax.set_yticklabels(ticklabels, fontsize=ftsize)
cbar.ax.tick_params(labelsize=lbsize)
if label is not None:
cbar.set_label(label, size=ftsize)
if limits is not None:
cbar.set_clim(limits)
return
| mit |
tgquintela/Mscthesis | Tests/test_project1.py | 1 | 5201 |
import numpy as np
from pySpatialTools.Geo_tools import general_projection
from pythonUtils.Logger import Logger
#### 1. Prepare empresas
from Mscthesis.IO import Firms_Parser
from Mscthesis.Preprocess import Firms_Preprocessor
parentpath = '/home/tono/mscthesis/code/Data/pruebas_clean'
logfile = '/home/tono/mscthesis/code/Data/Outputs/Logs/log_clean.log'
## Parse
logger = Logger(logfile)
parser = Firms_Parser(logger)
empresas, typevars = parser.parse(parentpath, year=2006)
## Preprocess
preprocess = Firms_Preprocessor(typevars, logger)
empresas = preprocess.preprocess(empresas)
### Prepare municipios
from Mscthesis.IO import Municipios_Parser
from pySpatialTools.Interpolation import general_density_assignation
from pySpatialTools.Retrieve import KRetriever
from pySpatialTools.Interpolation.density_assignation_process import \
DensityAssign_Process
from pySpatialTools.Interpolation.density_utils import population_assignation_f
#from Mscthesis.Preprocess.comp_complementary_data import population_assignation_f, compute_population_data
# municipios file
mpiosfile = '/home/tono/mscthesis/code/Data/municipios_data/municipios-espana_2014_complete.csv'
mparser = Municipios_Parser(None)
data, typ = mparser.parse(mpiosfile)
params = {'f_weights': 'exponential', 'params_w': {'max_r': 10.}, 'f_dens': population_assignation_f, 'params_d': {}}
params_proj = {'method': 'ellipsoidal', 'inverse': False, 'radians': False}
data.loc[:, typ['loc_vars']] = general_projection(data, typ['loc_vars'], **params_proj)
locs = empresas[typevars['loc_vars']]
retriever = KRetriever
info_ret = np.ones(locs.shape[0]).astype(int)*3
## ProcessClass
pop_assign = DensityAssign_Process(logger, retriever)
m = pop_assign.compute_density(locs, data, typ, info_ret, params)
###########################################
## TODO: Join function
## TODO: Discrete vars, continious vars?
empresas['population_idx'] = m
typevars['pop_var'] = 'population_idx'
########################################
#### 2. Compute model descriptors
from pySpatialTools.IO import create_reindices
from Mscthesis.Preprocess import create_info_ret, create_cond_agg
from pySpatialTools.Preprocess import Aggregator
from pySpatialTools.Retrieve import CircRetriever, Neighbourhood, KRetriever
#compute_population_data(locs, pop, popvars, retriever, info_ret, params)
## Define aggregator
agg = Aggregator(typevars=typevars)
## Define permuts
permuts = 2
reindices = create_reindices(empresas.shape[0], permuts)
## Define info retriever and conditional aggregator
empresas, typevars = create_info_ret(empresas, typevars)
empresas, typevars = create_cond_agg(empresas, typevars, np.random.randint(0, 2, empresas.shape[0]).astype(bool))
### Aplying model
from pySpatialTools.Descriptor_Models import Pjensen, ModelProcess, Countdescriptor
## Define descriptormodel
descriptormodel = Countdescriptor(empresas, typevars)
## Define retriever (Neigh has to know typevars) (TODO: define bool_var)
retriever = CircRetriever(empresas[typevars['loc_vars']].as_matrix())
aggretriever = KRetriever
Neigh = Neighbourhood(retriever, typevars, empresas, reindices, aggretriever, funct=descriptormodel.compute_aggcharacterizers)
del locs
## Define process
modelprocess = ModelProcess(logger, Neigh, descriptormodel, typevars=typevars,
lim_rows=5000, proc_name='Test')
count_matrix = modelprocess.compute_matrix(empresas, reindices)
descriptormodel = Pjensen(empresas, typevars)
modelprocess = ModelProcess(logger, Neigh, descriptormodel, typevars=typevars,
lim_rows=5000, proc_name='Test')
pjensen_matrix = modelprocess.compute_matrix(empresas, reindices)
corrs = modelprocess.compute_net(empresas,reindices)
net = modelprocess.filter_with_random_nets(corrs, 0.03)
### 3. Recommmendation
from pySpatialTools.Recommender import PjensenRecommender
from pySpatialTools.Recommender import SupervisedRmodel
from sklearn.cross_validation import KFold
from pySpatialTools.Interpolation.density_assignation import \
general_density_assignation, from_distance_to_weights, compute_measure_i
################### Pjensen ###################
# definition of parameters
feat_arr = np.array(empresas[typevars['feat_vars']])
# Instantiation of the class
recommender = PjensenRecommender()
# Making the predictionmatrix
Q = recommender.compute_quality(net, count_matrix, feat_arr, 0)
Qs, idxs = recommender.compute_kbest_type(net, count_matrix, feat_arr, 5)
################## Supervised #################
# definition of parameters
feat_arr = np.array(empresas[typevars['feat_vars']])
# Instantiation of the class
recommender = SupervisedRmodel(modelcl, pars_model, cv, pars_cv)
model, measure = recommender.fit_model(pjensen_matrix, feat_arr)
Q = recommender.compute_quality(pjensen_matrix)
################## Supervised #################
# definition of parameters
feat_arr = np.array(empresas[typevars['feat_vars']])
weights_f = lambda
general_density_assignation(locs, retriever, info_ret, values, f_weights,
params_w, f_dens, params_d)
# Instantiation of the class
recommender = NeighRecommender(retriever, weights_f)
| mit |
loretoparisi/docker | theano/rsc15/run_rsc15.py | 1 | 3946 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 6 18:14:46 2016
@author: Balázs Hidasi
@lastmodified: Loreto Parisi (loretoparisi at gmail dot com)
"""
import sys
import os
sys.path.append('../..')
import numpy as np
import pandas as pd
import gru4rec
import evaluation
from theano.misc.pkl_utils import dump,load
# To redirect output to file
class Logger(object):
def __init__(self, filename="Default.log"):
self.terminal = sys.stdout
self.log = open(filename, "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
sys.stdout = Logger( os.environ['HOME' ] + '/theano.log' )
PATH_TO_TRAIN = os.environ['HOME']+'/rsc15_train_full.txt'
PATH_TO_TEST = os.environ['HOME']+'/rsc15_test.txt'
LAYERS = 1
EPOCHS = 1
BATCH_SIZE = 50
if __name__ == '__main__':
data = pd.read_csv(PATH_TO_TRAIN, sep='\t', dtype={'ItemId':np.int64})
valid = pd.read_csv(PATH_TO_TEST, sep='\t', dtype={'ItemId':np.int64})
print('Training GRU4Rec with ' + str(LAYERS) + ' hidden units and ' + str(EPOCHS) + ' epochs')
#Reproducing results from "Session-based Recommendations with Recurrent Neural Networks" on RSC15 (http://arxiv.org/abs/1511.06939)
gru = gru4rec.GRU4Rec(loss='top1',
final_act='tanh',
hidden_act='tanh',
layers=[LAYERS],
batch_size=BATCH_SIZE,
dropout_p_hidden=0.5,
learning_rate=0.01,
momentum=0.0,
n_epochs=EPOCHS,
time_sort=False)
gru.fit(data)
res = evaluation.evaluate_sessions_batch(gru, valid, None)
print('Recall@20: {}'.format(res[0]))
print('MRR@20: {}'.format(res[1]))
#Reproducing results from "Recurrent Neural Networks with Top-k Gains for Session-based Recommendations" on RSC15 (http://arxiv.org/abs/1706.03847)
# gru = gru4rec.GRU4Rec(loss='bpr-max-0.5',
# final_act='linear',
# hidden_act='tanh',
# layers=[LAYERS],
# batch_size=32,
# dropout_p_hidden=0.0,
# learning_rate=0.2,
# momentum=0.5,
# n_sample=2048,
# sample_alpha=0,
# n_epochs=EPOCHS,
# time_sort=True)
# gru.fit(data)
# res = evaluation.evaluate_sessions_batch(gru, valid, None)
# print('Recall@20: {}'.format(res[0]))
# print('MRR@20: {}'.format(res[1]))
#
# LP: calculate predictions on a batch of sessions items
# break_ties: boolean
# Whether to add a small random number to each prediction value in order to break up possible ties, which can mess up the evaluation.
# GRU4Rec usually does not produce ties, except when the output saturates;
break_ties=True
# batch_size : int
# Number of events bundled into a batch during evaluation. Speeds up evaluation.
# If it is set high, the memory consumption increases. Default value is 100.
batch_size=10
session_ids = valid.SessionId.values[0:batch_size]
input_item_ids = valid.ItemId.values[0:batch_size]
out_idx = valid.ItemId.values[0:batch_size]
uniq_out = np.unique(np.array(out_idx, dtype=np.int32))
#predict_for_item_ids = np.hstack([data, uniq_out[~np.in1d(uniq_out,data)]])
#LP: comment this if above works!
predict_for_item_ids = None
print('session_ids: {}'.format(session_ids))
print('input_item_ids: {}'.format(input_item_ids))
print('uniq_out: {}'.format(uniq_out))
print('predict_for_item_ids: {}'.format(predict_for_item_ids))
preds = gru.predict_next_batch(session_ids, input_item_ids, predict_for_item_ids, batch_size)
preds.fillna(0, inplace=True)
if break_ties:
preds += np.random.rand(*preds.values.shape) * 1e-8
print('Preds: {}'.format(preds))
# save model
fd = open( os.environ['HOME' ] + 'model.theano','wb')
dump(gru,fd)
fd.close()
print('Model: {}'.format(md))
| mit |
CameronTEllis/brainiak | brainiak/reconstruct/iem.py | 2 | 18276 | # Copyright 2018 David Huberdeau & Peter Kok
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inverted Encoding Model (IEM)
Method to decode and reconstruct features from data.
The implementation is roughly based on the following publications:
[Kok2013] "1.Kok, P., Brouwer, G. J., Gerven, M. A. J. van &
Lange, F. P. de. Prior Expectations Bias Sensory Representations
in Visual Cortex. J. Neurosci. 33, 16275–16284 (2013).
[Brouwer2011] "2.Brouwer, G. J. & Heeger, D. J. Cross-orientation
suppression in human visual cortex. J. Neurophysiol. 106(5):
2108-2119 (2011).
[Brouwer2009] "3.Brouwer, G. J. & Heeger, D. J.
Decoding and Reconstructing Color from Responses in Human Visual
Cortex. J. Neurosci. 29, 13992–14003 (2009).
This implementation uses a set of sinusoidal
basis functions to represent the set of possible feature values.
A feature value is some characteristic of a stimulus, e.g. the
angular location of a target along a horizontal line. This code was
written to give some flexibility compared to the specific instances
in Kok, 2013 & in Brouwer, 2009. Users can set the number of basis
functions, or channels, and the range of possible feature values.
"""
# Authors: David Huberdeau (Yale University) &
# Peter Kok (Yale University), 2018 &
# Vy Vo (Intel Corp., UCSD), 2019
import logging
import warnings
import numpy as np
import scipy.stats
from sklearn.base import BaseEstimator
from ..utils.utils import circ_dist
__all__ = [
"InvertedEncoding",
]
logger = logging.getLogger(__name__)
MAX_CONDITION_CHECK = 9000
class InvertedEncoding(BaseEstimator):
"""Basis function-based reconstruction method
Inverted encoding models (alternatively known as forward
models) are used to reconstruct a feature, e.g. color of
a stimulus, from patterns across voxels in functional
data. The model uses n_channels number of idealized
basis functions and assumes that the transformation from
stimulus feature (e.g. color) to basis function is one-
to-one and invertible. The response of a voxel is
expressed as the weighted sum of basis functions.
In this implementation, basis functions were half-wave
rectified sinusoid functions raised to a power set by
the user (e.g. 6).
The model:
Inverted encoding models reconstruct a stimulus feature from
patterns of BOLD activity by relating the activity in each
voxel, B, to the values of hypothetical channels (or basis
functions), C, according to Equation 1 below.
(1) B = W*C
where W is a weight matrix that represents the relationship
between BOLD activity and Channels. W must be estimated from
training data; this implementation (and most described in the
literature) uses linear regression to estimate W as in Equation
2 below [note: inv() represents matrix inverse or
pseudo-inverse].
(2) W_est = B_train*inv(C_train)
The weights in W_est (short for "estimated") represent the
contributions of each channel to the response of each voxel.
Estimated channel responses can be computed given W_est and
new voxel activity represented in matrix B_exp (short for
"experiment") through inversion of Equation 1:
(3) C_est = inv(W_est)*B_exp
Given estimated channel responses, C_est, it is straighforward
to obtain the reconstructed feature value by summing over
channels multiplied by their channel responses and taking the
argmax (i.e. the feature associated with the maximum value).
Using this model:
Use fit() to estimate the weights of the basis functions given
input data (e.g. beta values from fMRI data). This function
will execute equation 2 above.
Use predict() to compute predicted stimulus values
from new functional data. This function computes estimated
channel responses, as in equation 3, then computes summed
channel output and finds the argmax (within the stimulus
feature space) associated with those responses.
Use score() to compute a measure of the error of the prediction
based on known stimuli.
This implementation assumes a circular (or half-
circular) feature domain. Future implementations might
generalize the feature input space, and increase the
possible dimensionality.
Parameters
----------
n_channels: int, default 5. Number of channels
The number of channels, or basis functions, to be used in
the inverted encoding model.
channel_exp: int, default 6. Basis function exponent.
The exponent of the sinuoidal basis functions, which
establishes the width of the functions.
stimulus_mode: str, default 'halfcircular' (other option is
'circular'). Describes the feature domain.
range_start: double, default 0. Lowest value of domain.
Beginning value of range of independent variable
(usually degrees).
range_stop: double, default 180. Highest value of domain.
Ending value of range of independent variable
(usually degrees).
channel_density: int, default 180. Number of points in the
feature domain.
stimulus_resolution: double, default None will set the stimulus
resolution to be identical to the channel density. This sets
the resolution at which the stimuli were presented (e.g. a
spatial position with some width has a lower stimulus
resolution).
Attributes
----------
channels_: [n_channels, channel density] NumPy 2D array
matrix defining channel values
W_: sklearn.linear_model model containing weight matrix that
relates estimated channel responses to response amplitude
data
"""
def __init__(self, n_channels=6, channel_exp=5,
stimulus_mode='halfcircular', range_start=0.,
range_stop=180., channel_density=180,
stimulus_resolution=None):
self.n_channels = n_channels
self.channel_exp = channel_exp
self.stimulus_mode = stimulus_mode
self.range_start = range_start
self.range_stop = range_stop
self.channel_density = channel_density
self.channel_domain = np.linspace(range_start, range_stop-1,
channel_density)
if stimulus_resolution is None:
self.stim_res = channel_density
else:
self.stim_res = stimulus_resolution
self._check_params()
def _check_params(self):
if self.range_start >= self.range_stop:
raise ValueError("range_start {} must be less than "
"{} range_stop.".format(self.range_start,
self.range_stop))
if self.stimulus_mode == 'halfcircular':
if (self.range_stop - self.range_start) != 180.:
raise ValueError("For half-circular feature spaces,"
"the range must be 180 degrees, "
"not {}".format(self.range_stop
- self.range_start))
elif self.stimulus_mode == 'circular':
if (self.range_stop - self.range_start) != 360.:
raise ValueError("For circular feature spaces, the"
" range must be 360 degrees"
"not {}".format(self.range_stop
- self.range_start))
if self.n_channels < 2:
raise ValueError("Insufficient number of channels.")
if not np.isin(self.stimulus_mode, ['circular',
'halfcircular']):
raise ValueError("Stimulus mode must be one of these: "
"'circular', 'halfcircular'")
def fit(self, X, y):
"""Use data and feature variable labels to fit an IEM
Parameters
----------
X: numpy matrix of voxel activation data. [observations, voxels]
Should contain the beta values for each observation or
trial and each voxel of training data.
y: numpy array of response variable. [observations]
Should contain the feature for each observation in X.
"""
# Check that data matrix is well conditioned:
if np.linalg.cond(X) > MAX_CONDITION_CHECK:
logger.error("Data is singular.")
raise ValueError("Data matrix is nearly singular.")
if X.shape[0] < self.n_channels:
logger.error("Not enough observations. Cannot calculate "
"pseudoinverse.")
raise ValueError("Fewer observations (trials) than "
"channels. Cannot compute pseudoinverse.")
# Check that the data matrix is the right size
shape_data = np.shape(X)
shape_labels = np.shape(y)
if len(shape_data) != 2:
raise ValueError("Data matrix has too many or too few "
"dimensions.")
else:
if shape_data[0] != shape_labels[0]:
raise ValueError(
"Mismatched data samples and label samples")
# Define the channels (or basis set)
self.channels_, channel_centers = self._define_channels()
logger.info("Defined channels centered at {} degrees."
.format(np.rad2deg(channel_centers)))
# Create a matrix of channel activations for every observation.
# (i.e., C1 in Brouwer & Heeger 2009.)
C = self._define_trial_activations(y)
# Solve for W in B = WC
self.W_ = X.transpose() @ np.linalg.pinv(C.transpose())
if np.linalg.cond(self.W_) > MAX_CONDITION_CHECK:
logger.error("Weight matrix is nearly singular.")
raise ValueError("Weight matrix is nearly singular.")
return self
def predict(self, X):
"""Use test data to predict the feature
Parameters
----------
X: numpy matrix of voxel activation from test trials
[observations, voxels]. Used to predict feature
associated with the given observation.
Returns
-------
model_prediction: numpy array of estimated feature values.
"""
# Check that the data matrix is the right size
shape_data = np.shape(X)
if len(shape_data) != 2:
raise ValueError("Data matrix has too many or too few "
"dimensions.")
model_prediction = self._predict_features(X)
return model_prediction
def score(self, X, y):
"""Calculate error measure of prediction. Default measurement
is R^2, the coefficient of determination.
Parameters
----------
X: numpy matrix of voxel activation from new data
[observations,voxels]
y: numpy array of responses. [observations]
Returns
-------
score_value: the error measurement between the actual
feature and predicted features.
"""
pred_features = self.predict(X)
if self.stimulus_mode == 'halfcircular':
# multiply features by 2. otherwise doesn't wrap properly
pred_features = pred_features * 2
y = y * 2
ssres = (circ_dist(np.deg2rad(y), np.deg2rad(pred_features))**2).sum()
sstot = (circ_dist(np.deg2rad(y),
np.ones(y.size)*scipy.stats.circmean(np.deg2rad(y))
) ** 2).sum()
score_value = (1 - ssres/sstot)
return score_value
def get_params(self):
"""Returns model parameters.
Returns
-------
params: parameter of this object
"""
return{"n_channels": self.n_channels,
"channel_exp": self.channel_exp,
"stimulus_mode": self.stimulus_mode,
"range_start": self.range_start,
"range_stop": self.range_stop,
"channel_domain": self.channel_domain,
"stim_res": self.stim_res}
def set_params(self, **parameters):
"""Sets model parameters after initialization.
Parameters
----------
parameters: structure with parameters and change values
"""
for parameter, value in parameters.items():
setattr(self, parameter, value)
setattr(self, "channel_domain",
np.linspace(self.range_start, self.range_stop - 1,
self.channel_density))
self._check_params()
return self
def _define_channels(self):
"""Define basis functions (aka channels).
Returns
-------
channels: numpy matrix of basis functions. dimensions are
[n_channels, function resolution].
channel_centers: numpy array of the centers of each channel
"""
channel_centers = np.linspace(np.deg2rad(self.range_start),
np.deg2rad(self.range_stop),
self.n_channels + 1)
channel_centers = channel_centers[0:-1]
# make sure channels are not bimodal if using 360 deg space
if self.stimulus_mode == 'circular':
domain = self.channel_domain * 0.5
centers = channel_centers * 0.5
elif self.stimulus_mode == 'halfcircular':
domain = self.channel_domain
centers = channel_centers
# define exponentiated function
channels = np.asarray([np.cos(np.deg2rad(domain) - cx) **
self.channel_exp
for cx in centers])
# half-wave rectification preserving circularity
channels = abs(channels)
return channels, channel_centers
def _define_trial_activations(self, stimuli):
"""Defines a numpy matrix of predicted channel responses for
each trial/observation.
Parameters
stimuli: numpy array of the feature values for each
observation (e.g., [0, 5, 15, 30, ...] degrees)
Returns
-------
C: matrix of predicted channel responses. dimensions are
number of observations by stimulus resolution
"""
stim_axis = np.linspace(self.range_start, self.range_stop-1,
self.stim_res)
if self.range_start > 0:
stimuli = stimuli + self.range_start
elif self.range_start < 0:
stimuli = stimuli - self.range_start
one_hot = np.eye(self.stim_res)
indices = [np.argmin(abs(stim_axis - x)) for x in stimuli]
stimulus_mask = one_hot[indices, :]
if self.channel_density != self.stim_res:
if self.channel_density % self.stim_res == 0:
stimulus_mask = np.repeat(stimulus_mask, self.channel_density /
self.stim_res)
else:
raise NotImplementedError("This code doesn't currently support"
" stimuli which are not square "
"functions in the feature domain, or"
" stimulus widths that are not even"
"divisors of the number of points in"
" the feature domain.")
C = stimulus_mask @ self.channels_.transpose()
# Check that C is full rank
if np.linalg.matrix_rank(C) < self.n_channels:
warnings.warn("Stimulus matrix is {}, not full rank. May cause "
"issues with stimulus prediction/reconstruction."
.format(np.linalg.matrix_rank(C)), RuntimeWarning)
return C
def _predict_channel_responses(self, X):
"""Computes predicted channel responses from data
(e.g. C2 in Brouwer & Heeger 2009)
Parameters
----------
X: numpy data matrix. [observations, voxels]
Returns
-------
channel_response: numpy matrix of channel responses
"""
channel_response = np.matmul(np.linalg.pinv(self.W_),
X.transpose())
return channel_response
def _predict_feature_responses(self, X):
"""Takes channel weights and transforms them into continuous
functions defined in the feature domain.
Parameters
---------
X: numpy matrix of data. [observations, voxels]
Returns
-------
pred_response: predict response from all channels. Used
to predict feature (e.g. direction).
"""
pred_response = np.matmul(self.channels_.transpose(),
self._predict_channel_responses(X))
return pred_response
def _predict_features(self, X):
"""Predicts feature value (e.g. direction) from data in X.
Takes the maximum of the 'reconstructed' or predicted response
function.
Parameters
---------
X: numpy matrix of data. [observations, voxels]
Returns
-------
pred_features: predicted feature from response across all
channels.
"""
pred_response = self._predict_feature_responses(X)
feature_ind = np.argmax(pred_response, 0)
pred_features = self.channel_domain[feature_ind]
return pred_features
| apache-2.0 |
hodlin/BDA_py_demos | demos_ch6/demo6_3.py | 19 | 1544 | """Bayesian Data Analysis, 3rd ed
Chapter 6, demo 3
Posterior predictive checking
Light speed example with a poorly chosen test statistic
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
# edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8', linewidth=2)
plt.rc('axes', color_cycle=('#377eb8','#e41a1c','#4daf4a',
'#984ea3','#ff7f00','#ffff33'))
# data
data_path = '../utilities_and_data/light.txt'
y = np.loadtxt(data_path)
# sufficient statistics
n = len(y)
s2 = np.var(y, ddof=1) # Here ddof=1 is used to get the sample estimate.
my = np.mean(y)
# A second example of replications
nsamp = 1000
pps = np.random.standard_t(n-1, size=(n,nsamp))*np.sqrt(s2*(1+1/n)) + my
# Use the sample variance as a test statistic
# This is a poor choice since it corresponds directly to
# the variance parameter in the model which has been fitted
# to the data.
pp = np.var(pps, axis=0, ddof=1)
# ====== plot
plt.hist(pp, 20, label='Variances of the replicated data sets')
plt.axvline(s2, color='#e41a1c', label='Variance of the original data')
plt.yticks(())
plt.title('Light speed example with a poorly chosen test statistic\n'
r'$\operatorname{Pr}(T(y_\mathrm{rep},\theta)\leq T(y,\theta)|y)=0.42$')
plt.legend()
# make room for the title and legend
axis = plt.gca()
axis.set_ylim((0, axis.get_ylim()[1]*1.2))
box = axis.get_position()
axis.set_position([box.x0, box.y0, box.width, box.height * 0.9])
plt.show()
| gpl-3.0 |
rajat1994/scikit-learn | sklearn/cluster/spectral.py | 233 | 18153 | # -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux [email protected]
# Brian Cheung
# Wei LI <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float
Scaling factor of RBF, polynomial, exponential chi^2 and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
| bsd-3-clause |
loli/sklearn-ensembletrees | examples/plot_digits_classification.py | 289 | 2397 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 3 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# pylab.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| bsd-3-clause |
cwu2011/scikit-learn | examples/svm/plot_custom_kernel.py | 115 | 1546 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(x, y):
"""
We create a custom kernel:
(2 0)
k(x, y) = x ( ) y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(x, M), y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
EderSantana/agnez | agnez/video.py | 2 | 6717 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from functools import wraps
from .embedding import _prepare_fig_labels
from .grid import img_grid, grid2d
def image_sequence(X, shape):
'''Image Sequence converts a matrix with different examples in each
row to a sequence of resized image
Paramters
---------
X: 2D `numpy.array`
Matrix with flatten examples on each row
shape: list
list with the shape to resize the flatten elements in X
convention is (channels, rows, columns)
'''
X = X.reshape((-1,)+shape)
if len(shape) == 3:
X = X.transpose(2, 0, 3, 1)
X = X.reshape(X.shape[0], X.shape[1]*X.shape[2], X.shape[3])
else:
X = X.swapaxes(0, 1)
X = X.reshape((X.shape[0], X.shape[1]*X.shape[2]))
return X
def _prepare_axis(subplot, data=None):
ax = plt.subplot(subplot, aspect='equal')
if data is not None:
xymin = data.min()-data.std()/3
xymax = data.max()+data.std()/3
plt.xlim(xymin, xymax)
plt.ylim(xymin, xymax)
ax.axis('off')
return ax
def animate(func):
@wraps(func)
def wrapper(*args, **kwargs):
make_frame, fig, fargs, video_length, filepath = func(*args, **kwargs)
ani = animation.FuncAnimation(fig, make_frame, frames=video_length,
interval=100, fargs=fargs)
ani.save(filepath, writer='imagemagick', fps=5)
return ani
return wrapper
@animate
def make_gif(video, filepath='video.gif', gray=True, interpolation=None):
'''Transform a sequence of images into a gif
Parameters
----------
video: 4D `numpy.array`
array with image sequences with dimensions (frames, row, col, channels)
filepath: str
path to save the animation
rescale: bool
flag to rescale displayed images by grid2d
gray: bool
gray scale?
'''
fig = plt.figure()
ax1 = _prepare_axis(111)
t = video.shape[0]
if gray:
vid = ax1.imshow(video[0], interpolation=interpolation)
else:
vid = ax1.imshow(video[0], cmap='gray', interpolation=interpolation)
# plt.draw()
def make_frame(t, vid):
v = video[t]
if video.shape[-1] == 1:
v = v[:, :, 0]
vid.set_data(v)
return vid
return make_frame, fig, (vid,), t, filepath
@animate
def timeseries2dvideo(data, labels, filepath='ts2video.gif'):
'''2d scatter plot video of times series embedding
Parameters
----------
data: `numpy.array`
numpy array with dimensions (time, samples, 2)
labels: `numpy.array`
numpy vector with the label of each sample in data. `labels`
must have the same number of elements as the second dimension
of data
filepath: str
path to save the animation
'''
labels, palette, fig = _prepare_fig_labels(data, labels)
ax = _prepare_axis(111, data)
t, b, d = data.shape
data = data.transpose(1, 0, 2).reshape((t*b, d))
sc = ax.scatter([], [])
def make_frame(t, sc):
pts = data[t]
color = np.hstack([palette[labels[t].astype(np.int)], 1.])
offsets = np.vstack([sc.get_offsets(), pts])
sc.set_offsets(offsets)
colors = np.vstack([sc.get_facecolors(), color])
sc.set_facecolors(colors)
return make_frame, fig, (sc,), data.shape[0], filepath
@animate
def video_embedding(video, embedding, labels, filepath='video_ebd.gif'):
'''2D scatter plot video of times series embedding along side
its original image sequence.
Parameters
----------
video: 3D `numpy.array`
array with image sequences with dimensions (frames, samples, dim)
embedding: 3D `numpy.array`
2D embedding of each video with dimensions (frames, samples, 2)
labels: `numpy.array`
numpy vector with the label of each sample in data. `labels`
must have the same number of elements as the second dimension
of data
filepath: str
path to save the animation
'''
labels, palette, fig = _prepare_fig_labels(embedding, labels)
ax2 = _prepare_axis(121, embedding)
ax1 = _prepare_axis(122)
sc = ax2.scatter([], [])
t, b, d = embedding.shape
embedding = embedding.transpose(1, 0, 2).reshape((t*b, d))
t, b, d = video.shape
video = video.transpose(1, 0, 2).reshape((t*b, d))
dim = np.sqrt(d).astype('int')
init_frame = video[0].reshape((dim, dim))
vid = ax1.imshow(init_frame, cmap='gist_gray_r', vmin=video.min(),
vmax=video.max())
# plt.draw()
def make_frame(t, sc, vid):
pts = embedding[t]
frame = video[t].reshape((dim, dim))
color = np.hstack([palette[labels[t].astype(np.int)], 1.])
offsets = np.vstack([sc.get_offsets(), pts])
sc.set_offsets(offsets)
colors = np.vstack([sc.get_facecolors(), color])
sc.set_facecolors(colors)
vid.set_data(frame)
return sc, vid
return make_frame, fig, (sc, vid), t*b, filepath
@animate
def video_img_grid(video, filepath='video_grid.gif', rescale=False):
'''2D video grid for parallel visualization
based on agnez.img_grid
Parameters
----------
video: 5D `numpy.array`
array with image sequences with dimensions (samples, frames, channels,
height, width)
filepath: str
path to save the animation
rescale: bool
flag to rescale displayed images by grid2d
'''
fig = plt.figure(figsize=(20, 20))
ax1 = _prepare_axis(111)
t = video.shape[1]
grid = img_grid(video[:, 0])
vid = ax1.imshow(grid, cmap='gray')
# plt.draw()
def make_frame(t, vid):
grid = img_grid(video[:, t], rescale=rescale)
vid.set_data(grid)
return vid
return make_frame, fig, (vid,), t, filepath
@animate
def video_grid2d(video, filepath='video_grid.gif', rescale=False):
'''2D video grid for parallel visualization
based on agnez.grid2d
Parameters
----------
video: 3D `numpy.array`
array with image sequences with dimensions (frames, samples, dim)
filepath: str
path to save the animation
rescale: bool
flag to rescale displayed images by grid2d
'''
fig = plt.figure()
ax1 = _prepare_axis(111)
t, b, d = video.shape
grid = grid2d(video[0])
vid = ax1.imshow(grid, cmap='gray')
# plt.draw()
def make_frame(t, vid):
grid = grid2d(video[t], rescale=rescale)
vid.set_data(grid)
return vid
return make_frame, fig, (vid,), t, filepath
| bsd-3-clause |
PythonCharmers/bokeh | bokeh/cli/core.py | 42 | 16025 | from __future__ import absolute_import, print_function
import sys, os
from six.moves.urllib import request as urllib2
from six.moves import cStringIO as StringIO
import pandas as pd
try:
import click
is_click = True
except ImportError:
is_click = False
from . import help_messages as hm
from .utils import (get_chart_params, get_charts_mapping,
get_data_series, keep_source_input_sync, get_data_from_url)
from .. import charts as bc
from ..charts import utils as bc_utils
from bokeh.models.widgets import Button
# Define a mapping to connect chart types supported arguments and chart classes
CHARTS_MAP = get_charts_mapping()
if is_click:
@click.command()
@click.option('--input', 'input_source', default=None,help=hm.HELP_INPUT)
@click.option('--output', default='file://cli_output.html', help=hm.HELP_OUTPUT)
@click.option('--title', default='Bokeh CLI')
@click.option('--chart_type', default='Line')
@click.option('--index', default='', help=hm.HELP_INDEX)
@click.option('--series', default='', help=hm.HELP_SERIES)
@click.option('--palette')
@click.option('--buffer', default='f', help=hm.HELP_BUFFER)
@click.option('--sync_with_source', default=False)
@click.option('--update_ranges', 'update_ranges', flag_value=True,
default=False)
@click.option('--legend', 'show_legend', flag_value=True,
default=False)
@click.option('--window_size', default='0', help=hm.HELP_WIN_SIZE)
@click.option('--map', 'map_', default=None)
@click.option('--map_zoom', 'map_zoom', default=12)
@click.option('--map_layer', 'map_layer', default="hybrid")
@click.option('--smart_filters', 'smart_filters', flag_value=True,
default=False)
def cli(input_source, output, title, chart_type, series, palette, index,
buffer, sync_with_source, update_ranges, show_legend, window_size,
map_, smart_filters, map_zoom, map_layer):
"""Bokeh Command Line Tool is a minimal client to access high level plotting
functionality provided by bokeh.charts API.
Examples:
>> python bokeh-cli.py --title "My Nice Plot" --series "High,Low,Close"
--chart_type "Line" --palette Reds --input sample_data/stocks_data.csv
>> cat sample_data/stocks_data.csv | python bokeh-cli.py --buffer t
>> python bokeh-cli.py --help
"""
cli = CLI(
input_source, output, title, chart_type, series, palette, index, buffer,
sync_with_source, update_ranges, show_legend, window_size, map_,
smart_filters, map_zoom, map_layer
)
cli.run()
else:
def cli():
print("The CLI tool requires click to be installed")
class CLI(object):
"""This is the Bokeh Command Line Interface class and it is in
charge of providing a very high level access to bokeh charts and
extends it with functionality.
"""
def __init__(self, input_source, output, title, chart_type, series, palette,
index, buffer, sync_with_source, update_ranges, show_legend,
window_size, map_, smart_filters, map_zoom, map_layer):
"""Args:
input_source (str): path to the series data file (i.e.:
/source/to/my/data.csv)
NOTE: this can be either a path to a local file or an url
output (str, optional): Selects the plotting output, which
could either be sent to an html file or a bokeh server
instance. Syntax convention for this option is as follows:
<output_type>://<type_arg>
where:
- output_type: 'file' or 'server'
- 'file' type options: path_to_output_file
- 'server' type options syntax: docname[@url][@name]
Defaults to: --output file://cli_output.html
Examples:
--output file://cli_output.html
--output file:///home/someuser/bokeh_rocks/cli_output.html
--output server://clidemo
Default: file://cli_output.html.
title (str, optional): the title of your chart.
Default: None.
chart_type (str, optional): charts classes to use to consume and
render the input data.
Default: Line.
series (str, optional): Name of the series from the input source
to include in the plot. If not specified all source series
will be included.
Defaults to None.
palette (str, optional): name of the colors palette to use.
Default: None.
index (str, optional): Name of the data series to be used as the
index when plotting. By default the first series found on the
input file is taken
Default: None
buffer (str, optional): if is `t` reads data source as string from
input buffer using StringIO(sys.stdin.read()) instead of
reading from a file or an url.
Default: "f"
sync_with_source (bool, optional): if True keep the charts source
created on bokeh-server sync'ed with the source acting like
`tail -f`.
Default: False
window_size (int, optional): show up to N values then start dropping
off older ones
Default: '0'
Attributes:
source (obj): datasource object for the created chart.
chart (obj): created chart object.
"""
self.input = input_source
self.series = series
self.index = index
self.last_byte = -1
self.sync_with_source = sync_with_source
self.update_ranges = update_ranges
self.show_legend = show_legend
self.window_size = int(window_size)
self.smart_filters = smart_filters
self.map_options = {}
self.current_selection = []
self.source = self.get_input(input_source, buffer)
# get the charts specified by the user
self.factories = create_chart_factories(chart_type)
if palette:
print ("Sorry, custom palettes not supported yet, coming soon!")
# define charts init parameters specified from cmd line and create chart
self.chart_args = get_chart_params(
title, output, show_legend=self.show_legend
)
if self.smart_filters:
self.chart_args['tools'] = "pan,wheel_zoom,box_zoom,reset,save," \
"box_select,lasso_select"
if map_:
self.map_options['lat'], self.map_options['lng'] = \
[float(x) for x in map_.strip().split(',')]
self.map_options['zoom'] = int(map_zoom)
# Yeah, unfortunate namings.. :-)
self.map_options['map_type'] = map_layer
def on_selection_changed(self, obj, attrname, old, new):
self.current_selection = new
def limit_source(self, source):
""" Limit source to cli.window_size, if set.
Args:
source (mapping): dict-like object
"""
if self.window_size:
for key in source.keys():
source[key] = source[key][-self.window_size:]
def run(self):
""" Start the CLI logic creating the input source, data conversions,
chart instances to show and all other niceties provided by CLI
"""
try:
self.limit_source(self.source)
children = []
if self.smart_filters:
copy_selection = Button(label="copy current selection")
copy_selection.on_click(self.on_copy)
children.append(copy_selection)
self.chart = create_chart(
self.series, self.source, self.index, self.factories,
self.map_options, children=children, **self.chart_args
)
self.chart.show()
self.has_ranged_x_axis = 'ranged_x_axis' in self.source.columns
self.columns = [c for c in self.source.columns if c != 'ranged_x_axis']
if self.smart_filters:
for chart in self.chart.charts:
chart.source.on_change('selected', self, 'on_selection_changed')
self.chart.session.poll_document(self.chart.doc)
except TypeError:
if not self.series:
series_list = ', '.join(self.chart.values.keys())
print(hm.ERR_MSG_TEMPL % series_list)
raise
if self.sync_with_source:
keep_source_input_sync(self.input, self.update_source, self.last_byte)
def on_copy(self, *args, **kws):
print("COPYING CONTENT!")
# TODO: EXPERIMENTAL!!! THIS EXPOSE MANY SECURITY ISSUES AND SHOULD
# BE REMOVED ASAP!
txt = ''
for rowind in self.current_selection:
row = self.source.iloc[rowind]
txt += u"%s\n" % (u",".join(str(row[c]) for c in self.columns))
os.system("echo '%s' | pbcopy" % txt)
def update_source(self, new_source):
""" Update self.chart source with the new data retrieved from
new_source. It is done by parsing the new source line,
trasforming it to data to be appended to self.chart source
updating it on chart.session and actually updating chart.session
objects.
Args:
new_source (str): string that contains the new source row to
read to the current chart source.
"""
ns = pd.read_csv(StringIO(new_source), names=self.columns)
len_source = len(self.source)
if self.has_ranged_x_axis:
ns['ranged_x_axis'] = [len_source]
self.index = 'ranged_x_axis'
ns.index = [len_source]
self.source = pd.concat([self.source, ns])
# TODO: This should be replaced with something that just computes
# the new data and source
fig = create_chart(self.series, ns, self.index, self.factories,
self.map_options, **self.chart_args)
for i, _c in enumerate(fig.charts):
if not isinstance(_c, bc.GMap):
# TODO: nested charts are getting ridiculous. Need a better
# better interface for charts :-)
scc = self.chart.charts[i]
for k, v in _c.source.data.items():
scc.source.data[k] = list(scc.source.data[k]) + list(v)
self.limit_source(scc.source.data)
chart = scc.chart
chart.session.store_objects(scc.source)
if self.update_ranges:
plot = chart.plot
plot.y_range.start = min(
plot.y_range.start, _c.chart.plot.y_range.start
)
plot.y_range.end = max(
plot.y_range.end, _c.chart.plot.y_range.end
)
plot.x_range.start = min(
plot.x_range.start, _c.chart.plot.x_range.start
)
plot.x_range.end = max(
plot.x_range.end, _c.chart.plot.x_range.end
)
chart.session.store_objects(plot)
def get_input(self, filepath, buffer):
"""Parse received input options. If buffer is not false (=='f') if
gets input data from input buffer othewise opens file specified in
sourcefilename,
Args:
filepath (str): path to the file to read from to retrieve data
buffer (str): if == 't' reads data from input buffer
Returns:
string read from filepath/buffer
"""
if buffer != 'f':
filepath = StringIO(sys.stdin.read())
elif filepath is None:
msg = "No Input! Please specify --source_filename or --buffer t"
raise IOError(msg)
else:
if filepath.lower().startswith('http'):
# Create a request for the given URL.
request = urllib2.Request(filepath)
data = get_data_from_url(request)
self.last_byte = len(data)
else:
filepath = open(filepath, 'r').read()
self.last_byte = len(filepath)
filepath = StringIO(filepath)
source = pd.read_csv(filepath)
return source
def create_chart(series, source, index, factories, map_options=None, children=None, **args):
"""Create charts instances from types specified in factories using
data series names, source, index and args
Args:
series (list(str)): list of strings specifying the names of the
series to keep from source
source (DataFrame): pandas DataFrame with the data series to be
plotted
index (str): name of the series of source to be used as index.
factories (list(ChartObject)): list of chart classes to be used
to create the charts to be plotted
**args: arguments to pass to the charts when creating them.
"""
if not index:
# if no index was specified as for x axis
# we take a default "range"
index = 'ranged_x_axis'
# add the new x range data to the source dataframe
source[index] = range(len(source[source.columns[0]]))
indexes = [x for x in index.split(',') if x]
data_series = get_data_series(series, source, indexes)
# parse queries to create the charts..
charts = []
for chart_type in factories:
if chart_type == bc.GMap:
if not map_options or \
not all([x in map_options for x in ['lat', 'lng']]):
raise ValueError("GMap Charts need lat and lon coordinates!")
all_args = dict(map_options)
all_args.update(args)
chart = chart_type(**all_args)
else:
if chart_type == bc.TimeSeries:
# in case the x axis type is datetime that column must be converted to
# datetime
data_series[index] = pd.to_datetime(source[index])
elif chart_type == bc.Scatter:
if len(indexes) == 1:
scatter_ind = [x for x in data_series.pop(indexes[0]).values]
scatter_ind = [scatter_ind] * len(data_series)
else:
scatter_ind = []
for key in indexes:
scatter_ind.append([x for x in data_series.pop(key).values])
if len(scatter_ind) != len(data_series):
err_msg = "Number of multiple indexes must be equals" \
" to the number of series"
raise ValueError(err_msg)
for ind, key in enumerate(data_series):
values = data_series[key].values
data_series[key] = zip(scatter_ind[ind], values)
chart = chart_type(data_series, **args)
if hasattr(chart, 'index'):
chart.index = index
charts.append(chart)
fig = bc_utils.Figure(*charts, children=children, **args)
return fig
def create_chart_factories(chart_types):
"""Receive the chart type(s) specified by the user and build a
list of the their related functions.
Args:
series (str): string that contains the name of the
chart classes to use when creating the chart, separated by `,`
example:
>> create_chart_factories('Line,step')
[Line, Step]
"""
return [get_chart(name) for name in chart_types.split(',') if name]
def get_chart(class_name):
"""Return the bokeh class specified in class_name.
Args:
class_name (str): name of the chart class to return (i.e.: Line|step)
"""
return CHARTS_MAP[class_name.strip().lower()]
if __name__ == '__main__':
cli()
| bsd-3-clause |
yordan-desta/QgisIns | python/plugins/processing/algs/qgis/RasterLayerHistogram.py | 2 | 3184 | # -*- coding: utf-8 -*-
"""
***************************************************************************
RasterLayerHistogram.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
from PyQt4.QtCore import *
from qgis.core import *
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterRaster
from processing.core.outputs import OutputTable
from processing.core.outputs import OutputHTML
from processing.tools import dataobjects
from processing.tools import raster
class RasterLayerHistogram(GeoAlgorithm):
INPUT = 'INPUT'
PLOT = 'PLOT'
TABLE = 'TABLE'
BINS = 'BINS'
def processAlgorithm(self, progress):
uri = self.getParameterValue(self.INPUT)
layer = dataobjects.getObjectFromUri(uri)
outputplot = self.getOutputValue(self.PLOT)
outputtable = self.getOutputFromName(self.TABLE)
values = raster.scanraster(layer, progress)
nbins = self.getParameterValue(self.BINS)
# ALERT: this is potentially blocking if the layer is too big
plt.close()
valueslist = []
for v in values:
if v is not None:
valueslist.append(v)
(n, bins, values) = plt.hist(valueslist, nbins)
fields = [QgsField('CENTER_VALUE', QVariant.Double),
QgsField('NUM_ELEM', QVariant.Double)]
writer = outputtable.getTableWriter(fields)
for i in xrange(len(values)):
writer.addRecord([str(bins[i]) + '-' + str(bins[i + 1]), n[i]])
plotFilename = outputplot + '.png'
lab.savefig(plotFilename)
f = open(outputplot, 'w')
f.write('<img src="' + plotFilename + '"/>')
f.close()
def defineCharacteristics(self):
self.name = 'Raster layer histogram'
self.group = 'Graphics'
self.addParameter(ParameterRaster(self.INPUT, 'Input layer'))
self.addParameter(ParameterNumber(self.BINS, 'Number of bins', 2,
None, 10))
self.addOutput(OutputHTML(self.PLOT, 'Output plot'))
self.addOutput(OutputTable(self.TABLE, 'Output table'))
| gpl-2.0 |
yl565/statsmodels | statsmodels/graphics/regressionplots.py | 3 | 39486 | '''Partial Regression plot and residual plots to find misspecification
Author: Josef Perktold
License: BSD-3
Created: 2011-01-23
update
2011-06-05 : start to convert example to usable functions
2011-10-27 : docstrings
'''
from statsmodels.compat.python import lrange, string_types, lzip, range
import numpy as np
import pandas as pd
from patsy import dmatrix
from statsmodels.regression.linear_model import OLS, GLS, WLS
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod.generalized_estimating_equations import GEE
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from statsmodels.graphics import utils
from statsmodels.nonparametric.smoothers_lowess import lowess
from statsmodels.tools.tools import maybe_unwrap_results
from statsmodels.base import model
from ._regressionplots_doc import (
_plot_added_variable_doc,
_plot_partial_residuals_doc,
_plot_ceres_residuals_doc)
__all__ = ['plot_fit', 'plot_regress_exog', 'plot_partregress', 'plot_ccpr',
'plot_regress_exog', 'plot_partregress_grid', 'plot_ccpr_grid',
'add_lowess', 'abline_plot', 'influence_plot',
'plot_leverage_resid2', 'added_variable_resids',
'partial_resids', 'ceres_resids', 'plot_added_variable',
'plot_partial_residuals', 'plot_ceres_residuals']
#TODO: consider moving to influence module
def _high_leverage(results):
#TODO: replace 1 with k_constant
return 2. * (results.df_model + 1)/results.nobs
def add_lowess(ax, lines_idx=0, frac=.2, **lowess_kwargs):
"""
Add Lowess line to a plot.
Parameters
----------
ax : matplotlib Axes instance
The Axes to which to add the plot
lines_idx : int
This is the line on the existing plot to which you want to add
a smoothed lowess line.
frac : float
The fraction of the points to use when doing the lowess fit.
lowess_kwargs
Additional keyword arguments are passes to lowess.
Returns
-------
fig : matplotlib Figure instance
The figure that holds the instance.
"""
y0 = ax.get_lines()[lines_idx]._y
x0 = ax.get_lines()[lines_idx]._x
lres = lowess(y0, x0, frac=frac, **lowess_kwargs)
ax.plot(lres[:, 0], lres[:, 1], 'r', lw=1.5)
return ax.figure
def plot_fit(results, exog_idx, y_true=None, ax=None, **kwargs):
"""Plot fit against one regressor.
This creates one graph with the scatterplot of observed values compared to
fitted values.
Parameters
----------
results : result instance
result instance with resid, model.endog and model.exog as attributes
x_var : int or str
Name or index of regressor in exog matrix.
y_true : array_like
(optional) If this is not None, then the array is added to the plot
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
kwargs
The keyword arguments are passed to the plot command for the fitted
values points.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
Examples
--------
Load the Statewide Crime data set and perform linear regression with
`poverty` and `hs_grad` as variables and `murder` as the response
>>> import statsmodels.api as sm
>>> import matplotlib.pyplot as plt
>>> data = sm.datasets.statecrime.load_pandas().data
>>> murder = data['murder']
>>> X = data[['poverty', 'hs_grad']]
>>> X["constant"] = 1
>>> y = murder
>>> model = sm.OLS(y, X)
>>> results = model.fit()
Create a plot just for the variable 'Poverty':
>>> fig, ax = plt.subplots()
>>> fig = sm.graphics.plot_fit(results, 0, ax=ax)
>>> ax.set_ylabel("Murder Rate")
>>> ax.set_xlabel("Poverty Level")
>>> ax.set_title("Linear Regression")
>>> plt.show()
.. plot:: plots/graphics_plot_fit_ex.py
"""
fig, ax = utils.create_mpl_ax(ax)
exog_name, exog_idx = utils.maybe_name_or_idx(exog_idx, results.model)
results = maybe_unwrap_results(results)
#maybe add option for wendog, wexog
y = results.model.endog
x1 = results.model.exog[:, exog_idx]
x1_argsort = np.argsort(x1)
y = y[x1_argsort]
x1 = x1[x1_argsort]
ax.plot(x1, y, 'bo', label=results.model.endog_names)
if not y_true is None:
ax.plot(x1, y_true[x1_argsort], 'b-', label='True values')
title = 'Fitted values versus %s' % exog_name
prstd, iv_l, iv_u = wls_prediction_std(results)
ax.plot(x1, results.fittedvalues[x1_argsort], 'D', color='r',
label='fitted', **kwargs)
ax.vlines(x1, iv_l[x1_argsort], iv_u[x1_argsort], linewidth=1, color='k',
alpha=.7)
#ax.fill_between(x1, iv_l[x1_argsort], iv_u[x1_argsort], alpha=0.1,
# color='k')
ax.set_title(title)
ax.set_xlabel(exog_name)
ax.set_ylabel(results.model.endog_names)
ax.legend(loc='best', numpoints=1)
return fig
def plot_regress_exog(results, exog_idx, fig=None):
"""Plot regression results against one regressor.
This plots four graphs in a 2 by 2 figure: 'endog versus exog',
'residuals versus exog', 'fitted versus exog' and
'fitted plus residual versus exog'
Parameters
----------
results : result instance
result instance with resid, model.endog and model.exog as attributes
exog_idx : int
index of regressor in exog matrix
fig : Matplotlib figure instance, optional
If given, this figure is simply returned. Otherwise a new figure is
created.
Returns
-------
fig : matplotlib figure instance
"""
fig = utils.create_mpl_fig(fig)
exog_name, exog_idx = utils.maybe_name_or_idx(exog_idx, results.model)
results = maybe_unwrap_results(results)
#maybe add option for wendog, wexog
y_name = results.model.endog_names
x1 = results.model.exog[:, exog_idx]
prstd, iv_l, iv_u = wls_prediction_std(results)
ax = fig.add_subplot(2, 2, 1)
ax.plot(x1, results.model.endog, 'o', color='b', alpha=0.9, label=y_name)
ax.plot(x1, results.fittedvalues, 'D', color='r', label='fitted',
alpha=.5)
ax.vlines(x1, iv_l, iv_u, linewidth=1, color='k', alpha=.7)
ax.set_title('Y and Fitted vs. X', fontsize='large')
ax.set_xlabel(exog_name)
ax.set_ylabel(y_name)
ax.legend(loc='best')
ax = fig.add_subplot(2, 2, 2)
ax.plot(x1, results.resid, 'o')
ax.axhline(y=0, color='black')
ax.set_title('Residuals versus %s' % exog_name, fontsize='large')
ax.set_xlabel(exog_name)
ax.set_ylabel("resid")
ax = fig.add_subplot(2, 2, 3)
exog_noti = np.ones(results.model.exog.shape[1], bool)
exog_noti[exog_idx] = False
exog_others = results.model.exog[:, exog_noti]
from pandas import Series
fig = plot_partregress(results.model.data.orig_endog,
Series(x1, name=exog_name,
index=results.model.data.row_labels),
exog_others, obs_labels=False, ax=ax)
ax.set_title('Partial regression plot', fontsize='large')
#ax.set_ylabel("Fitted values")
#ax.set_xlabel(exog_name)
ax = fig.add_subplot(2, 2, 4)
fig = plot_ccpr(results, exog_idx, ax=ax)
ax.set_title('CCPR Plot', fontsize='large')
#ax.set_xlabel(exog_name)
#ax.set_ylabel("Fitted values + resids")
fig.suptitle('Regression Plots for %s' % exog_name, fontsize="large")
fig.tight_layout()
fig.subplots_adjust(top=.90)
return fig
def _partial_regression(endog, exog_i, exog_others):
"""Partial regression.
regress endog on exog_i conditional on exog_others
uses OLS
Parameters
----------
endog : array_like
exog : array_like
exog_others : array_like
Returns
-------
res1c : OLS results instance
(res1a, res1b) : tuple of OLS results instances
results from regression of endog on exog_others and of exog_i on
exog_others
"""
#FIXME: This function doesn't appear to be used.
res1a = OLS(endog, exog_others).fit()
res1b = OLS(exog_i, exog_others).fit()
res1c = OLS(res1a.resid, res1b.resid).fit()
return res1c, (res1a, res1b)
def plot_partregress(endog, exog_i, exog_others, data=None,
title_kwargs={}, obs_labels=True, label_kwargs={},
ax=None, ret_coords=False, **kwargs):
"""Plot partial regression for a single regressor.
Parameters
----------
endog : ndarray or string
endogenous or response variable. If string is given, you can use a
arbitrary translations as with a formula.
exog_i : ndarray or string
exogenous, explanatory variable. If string is given, you can use a
arbitrary translations as with a formula.
exog_others : ndarray or list of strings
other exogenous, explanatory variables. If a list of strings is given,
each item is a term in formula. You can use a arbitrary translations
as with a formula. The effect of these variables will be removed by
OLS regression.
data : DataFrame, dict, or recarray
Some kind of data structure with names if the other variables are
given as strings.
title_kwargs : dict
Keyword arguments to pass on for the title. The key to control the
fonts is fontdict.
obs_labels : bool or array-like
Whether or not to annotate the plot points with their observation
labels. If obs_labels is a boolean, the point labels will try to do
the right thing. First it will try to use the index of data, then
fall back to the index of exog_i. Alternatively, you may give an
array-like object corresponding to the obseveration numbers.
labels_kwargs : dict
Keyword arguments that control annotate for the observation labels.
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
ret_coords : bool
If True will return the coordinates of the points in the plot. You
can use this to add your own annotations.
kwargs
The keyword arguments passed to plot for the points.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
coords : list, optional
If ret_coords is True, return a tuple of arrays (x_coords, y_coords).
Notes
-----
The slope of the fitted line is the that of `exog_i` in the full
multiple regression. The individual points can be used to assess the
influence of points on the estimated coefficient.
See Also
--------
plot_partregress_grid : Plot partial regression for a set of regressors.
"""
#NOTE: there is no interaction between possible missing data and
#obs_labels yet, so this will need to be tweaked a bit for this case
fig, ax = utils.create_mpl_ax(ax)
# strings, use patsy to transform to data
if isinstance(endog, string_types):
endog = dmatrix(endog + "-1", data)
if isinstance(exog_others, string_types):
RHS = dmatrix(exog_others, data)
elif isinstance(exog_others, list):
RHS = "+".join(exog_others)
RHS = dmatrix(RHS, data)
else:
RHS = exog_others
RHS_isemtpy = False
if isinstance(RHS, np.ndarray) and RHS.size==0:
RHS_isemtpy = True
elif isinstance(RHS, pd.DataFrame) and RHS.empty:
RHS_isemtpy = True
if isinstance(exog_i, string_types):
exog_i = dmatrix(exog_i + "-1", data)
# all arrays or pandas-like
if RHS_isemtpy:
ax.plot(endog, exog_i, 'o', **kwargs)
fitted_line = OLS(endog, exog_i).fit()
x_axis_endog_name = 'x' if isinstance(exog_i, np.ndarray) else exog_i.name
y_axis_endog_name = 'y' if isinstance(endog, np.ndarray) else endog.design_info.column_names[0]
else:
res_yaxis = OLS(endog, RHS).fit()
res_xaxis = OLS(exog_i, RHS).fit()
xaxis_resid = res_xaxis.resid
yaxis_resid = res_yaxis.resid
x_axis_endog_name = res_xaxis.model.endog_names
y_axis_endog_name = res_yaxis.model.endog_names
ax.plot(xaxis_resid, yaxis_resid, 'o', **kwargs)
fitted_line = OLS(yaxis_resid, xaxis_resid).fit()
fig = abline_plot(0, fitted_line.params[0], color='k', ax=ax)
if x_axis_endog_name == 'y': # for no names regression will just get a y
x_axis_endog_name = 'x' # this is misleading, so use x
ax.set_xlabel("e(%s | X)" % x_axis_endog_name)
ax.set_ylabel("e(%s | X)" % y_axis_endog_name)
ax.set_title('Partial Regression Plot', **title_kwargs)
#NOTE: if we want to get super fancy, we could annotate if a point is
#clicked using this widget
#http://stackoverflow.com/questions/4652439/
#is-there-a-matplotlib-equivalent-of-matlabs-datacursormode/
#4674445#4674445
if obs_labels is True:
if data is not None:
obs_labels = data.index
elif hasattr(exog_i, "index"):
obs_labels = exog_i.index
else:
obs_labels = res_xaxis.model.data.row_labels
#NOTE: row_labels can be None.
#Maybe we should fix this to never be the case.
if obs_labels is None:
obs_labels = lrange(len(exog_i))
if obs_labels is not False: # could be array-like
if len(obs_labels) != len(exog_i):
raise ValueError("obs_labels does not match length of exog_i")
label_kwargs.update(dict(ha="center", va="bottom"))
ax = utils.annotate_axes(lrange(len(obs_labels)), obs_labels,
lzip(res_xaxis.resid, res_yaxis.resid),
[(0, 5)] * len(obs_labels), "x-large", ax=ax,
**label_kwargs)
if ret_coords:
return fig, (res_xaxis.resid, res_yaxis.resid)
else:
return fig
def plot_partregress_grid(results, exog_idx=None, grid=None, fig=None):
"""Plot partial regression for a set of regressors.
Parameters
----------
results : results instance
A regression model results instance
exog_idx : None, list of ints, list of strings
(column) indices of the exog used in the plot, default is all.
grid : None or tuple of int (nrows, ncols)
If grid is given, then it is used for the arrangement of the subplots.
If grid is None, then ncol is one, if there are only 2 subplots, and
the number of columns is two otherwise.
fig : Matplotlib figure instance, optional
If given, this figure is simply returned. Otherwise a new figure is
created.
Returns
-------
fig : Matplotlib figure instance
If `fig` is None, the created figure. Otherwise `fig` itself.
Notes
-----
A subplot is created for each explanatory variable given by exog_idx.
The partial regression plot shows the relationship between the response
and the given explanatory variable after removing the effect of all other
explanatory variables in exog.
See Also
--------
plot_partregress : Plot partial regression for a single regressor.
plot_ccpr
References
----------
See http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/partregr.htm
"""
import pandas
fig = utils.create_mpl_fig(fig)
exog_name, exog_idx = utils.maybe_name_or_idx(exog_idx, results.model)
#maybe add option for using wendog, wexog instead
y = pandas.Series(results.model.endog, name=results.model.endog_names)
exog = results.model.exog
k_vars = exog.shape[1]
#this function doesn't make sense if k_vars=1
if not grid is None:
nrows, ncols = grid
else:
if len(exog_idx) > 2:
nrows = int(np.ceil(len(exog_idx)/2.))
ncols = 2
title_kwargs = {"fontdict" : {"fontsize" : 'small'}}
else:
nrows = len(exog_idx)
ncols = 1
title_kwargs = {}
# for indexing purposes
other_names = np.array(results.model.exog_names)
for i, idx in enumerate(exog_idx):
others = lrange(k_vars)
others.pop(idx)
exog_others = pandas.DataFrame(exog[:, others],
columns=other_names[others])
ax = fig.add_subplot(nrows, ncols, i+1)
plot_partregress(y, pandas.Series(exog[:, idx],
name=other_names[idx]),
exog_others, ax=ax, title_kwargs=title_kwargs,
obs_labels=False)
ax.set_title("")
fig.suptitle("Partial Regression Plot", fontsize="large")
fig.tight_layout()
fig.subplots_adjust(top=.95)
return fig
def plot_ccpr(results, exog_idx, ax=None):
"""Plot CCPR against one regressor.
Generates a CCPR (component and component-plus-residual) plot.
Parameters
----------
results : result instance
A regression results instance.
exog_idx : int or string
Exogenous, explanatory variable. If string is given, it should
be the variable name that you want to use, and you can use arbitrary
translations as with a formula.
ax : Matplotlib AxesSubplot instance, optional
If given, it is used to plot in instead of a new figure being
created.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
plot_ccpr_grid : Creates CCPR plot for multiple regressors in a plot grid.
Notes
-----
The CCPR plot provides a way to judge the effect of one regressor on the
response variable by taking into account the effects of the other
independent variables. The partial residuals plot is defined as
Residuals + B_i*X_i versus X_i. The component adds the B_i*X_i versus
X_i to show where the fitted line would lie. Care should be taken if X_i
is highly correlated with any of the other independent variables. If this
is the case, the variance evident in the plot will be an underestimate of
the true variance.
References
----------
http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/ccpr.htm
"""
fig, ax = utils.create_mpl_ax(ax)
exog_name, exog_idx = utils.maybe_name_or_idx(exog_idx, results.model)
results = maybe_unwrap_results(results)
x1 = results.model.exog[:, exog_idx]
#namestr = ' for %s' % self.name if self.name else ''
x1beta = x1*results.params[exog_idx]
ax.plot(x1, x1beta + results.resid, 'o')
from statsmodels.tools.tools import add_constant
mod = OLS(x1beta, add_constant(x1)).fit()
params = mod.params
fig = abline_plot(*params, **dict(ax=ax))
#ax.plot(x1, x1beta, '-')
ax.set_title('Component and component plus residual plot')
ax.set_ylabel("Residual + %s*beta_%d" % (exog_name, exog_idx))
ax.set_xlabel("%s" % exog_name)
return fig
def plot_ccpr_grid(results, exog_idx=None, grid=None, fig=None):
"""Generate CCPR plots against a set of regressors, plot in a grid.
Generates a grid of CCPR (component and component-plus-residual) plots.
Parameters
----------
results : result instance
uses exog and params of the result instance
exog_idx : None or list of int
(column) indices of the exog used in the plot
grid : None or tuple of int (nrows, ncols)
If grid is given, then it is used for the arrangement of the subplots.
If grid is None, then ncol is one, if there are only 2 subplots, and
the number of columns is two otherwise.
fig : Matplotlib figure instance, optional
If given, this figure is simply returned. Otherwise a new figure is
created.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
Notes
-----
Partial residual plots are formed as::
Res + Betahat(i)*Xi versus Xi
and CCPR adds::
Betahat(i)*Xi versus Xi
See Also
--------
plot_ccpr : Creates CCPR plot for a single regressor.
References
----------
See http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/ccpr.htm
"""
fig = utils.create_mpl_fig(fig)
exog_name, exog_idx = utils.maybe_name_or_idx(exog_idx, results.model)
if grid is not None:
nrows, ncols = grid
else:
if len(exog_idx) > 2:
nrows = int(np.ceil(len(exog_idx)/2.))
ncols = 2
else:
nrows = len(exog_idx)
ncols = 1
seen_constant = 0
for i, idx in enumerate(exog_idx):
if results.model.exog[:, idx].var() == 0:
seen_constant = 1
continue
ax = fig.add_subplot(nrows, ncols, i+1-seen_constant)
fig = plot_ccpr(results, exog_idx=idx, ax=ax)
ax.set_title("")
fig.suptitle("Component-Component Plus Residual Plot", fontsize="large")
fig.tight_layout()
fig.subplots_adjust(top=.95)
return fig
def abline_plot(intercept=None, slope=None, horiz=None, vert=None,
model_results=None, ax=None, **kwargs):
"""
Plots a line given an intercept and slope.
intercept : float
The intercept of the line
slope : float
The slope of the line
horiz : float or array-like
Data for horizontal lines on the y-axis
vert : array-like
Data for verterical lines on the x-axis
model_results : statsmodels results instance
Any object that has a two-value `params` attribute. Assumed that it
is (intercept, slope)
ax : axes, optional
Matplotlib axes instance
kwargs
Options passed to matplotlib.pyplot.plt
Returns
-------
fig : Figure
The figure given by `ax.figure` or a new instance.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> np.random.seed(12345)
>>> X = sm.add_constant(np.random.normal(0, 20, size=30))
>>> y = np.dot(X, [25, 3.5]) + np.random.normal(0, 30, size=30)
>>> mod = sm.OLS(y,X).fit()
>>> fig = sm.graphics.abline_plot(model_results=mod)
>>> ax = fig.axes[0]
>>> ax.scatter(X[:,1], y)
>>> ax.margins(.1)
>>> import matplotlib.pyplot as plt
>>> plt.show()
"""
if ax is not None: # get axis limits first thing, don't change these
x = ax.get_xlim()
else:
x = None
fig, ax = utils.create_mpl_ax(ax)
if model_results:
intercept, slope = model_results.params
if x is None:
x = [model_results.model.exog[:, 1].min(),
model_results.model.exog[:, 1].max()]
else:
if not (intercept is not None and slope is not None):
raise ValueError("specify slope and intercepty or model_results")
if x is None:
x = ax.get_xlim()
data_y = [x[0]*slope+intercept, x[1]*slope+intercept]
ax.set_xlim(x)
#ax.set_ylim(y)
from matplotlib.lines import Line2D
class ABLine2D(Line2D):
def update_datalim(self, ax):
ax.set_autoscale_on(False)
children = ax.get_children()
abline = [children[i] for i in range(len(children))
if isinstance(children[i], ABLine2D)][0]
x = ax.get_xlim()
y = [x[0]*slope+intercept, x[1]*slope+intercept]
abline.set_data(x, y)
ax.figure.canvas.draw()
#TODO: how to intercept something like a margins call and adjust?
line = ABLine2D(x, data_y, **kwargs)
ax.add_line(line)
ax.callbacks.connect('xlim_changed', line.update_datalim)
ax.callbacks.connect('ylim_changed', line.update_datalim)
if horiz:
ax.hline(horiz)
if vert:
ax.vline(vert)
return fig
def influence_plot(results, external=True, alpha=.05, criterion="cooks",
size=48, plot_alpha=.75, ax=None, **kwargs):
"""
Plot of influence in regression. Plots studentized resids vs. leverage.
Parameters
----------
results : results instance
A fitted model.
external : bool
Whether to use externally or internally studentized residuals. It is
recommended to leave external as True.
alpha : float
The alpha value to identify large studentized residuals. Large means
abs(resid_studentized) > t.ppf(1-alpha/2, dof=results.df_resid)
criterion : str {'DFFITS', 'Cooks'}
Which criterion to base the size of the points on. Options are
DFFITS or Cook's D.
size : float
The range of `criterion` is mapped to 10**2 - size**2 in points.
plot_alpha : float
The `alpha` of the plotted points.
ax : matplotlib Axes instance
An instance of a matplotlib Axes.
Returns
-------
fig : matplotlib figure
The matplotlib figure that contains the Axes.
Notes
-----
Row labels for the observations in which the leverage, measured by the
diagonal of the hat matrix, is high or the residuals are large, as the
combination of large residuals and a high influence value indicates an
influence point. The value of large residuals can be controlled using the
`alpha` parameter. Large leverage points are identified as
hat_i > 2 * (df_model + 1)/nobs.
"""
fig, ax = utils.create_mpl_ax(ax)
infl = results.get_influence()
if criterion.lower().startswith('coo'):
psize = infl.cooks_distance[0]
elif criterion.lower().startswith('dff'):
psize = np.abs(infl.dffits[0])
else:
raise ValueError("Criterion %s not understood" % criterion)
# scale the variables
#TODO: what is the correct scaling and the assumption here?
#we want plots to be comparable across different plots
#so we would need to use the expected distribution of criterion probably
old_range = np.ptp(psize)
new_range = size**2 - 8**2
psize = (psize - psize.min()) * new_range/old_range + 8**2
leverage = infl.hat_matrix_diag
if external:
resids = infl.resid_studentized_external
else:
resids = infl.resid_studentized_internal
from scipy import stats
cutoff = stats.t.ppf(1.-alpha/2, results.df_resid)
large_resid = np.abs(resids) > cutoff
large_leverage = leverage > _high_leverage(results)
large_points = np.logical_or(large_resid, large_leverage)
ax.scatter(leverage, resids, s=psize, alpha=plot_alpha)
# add point labels
labels = results.model.data.row_labels
if labels is None:
labels = lrange(len(resids))
ax = utils.annotate_axes(np.where(large_points)[0], labels,
lzip(leverage, resids),
lzip(-(psize/2)**.5, (psize/2)**.5), "x-large",
ax)
#TODO: make configurable or let people do it ex-post?
font = {"fontsize" : 16, "color" : "black"}
ax.set_ylabel("Studentized Residuals", **font)
ax.set_xlabel("H Leverage", **font)
ax.set_title("Influence Plot", **font)
return fig
def plot_leverage_resid2(results, alpha=.05, ax=None,
**kwargs):
"""
Plots leverage statistics vs. normalized residuals squared
Parameters
----------
results : results instance
A regression results instance
alpha : float
Specifies the cut-off for large-standardized residuals. Residuals
are assumed to be distributed N(0, 1) with alpha=alpha.
ax : Axes instance
Matplotlib Axes instance
Returns
-------
fig : matplotlib Figure
A matplotlib figure instance.
"""
from scipy.stats import zscore, norm
fig, ax = utils.create_mpl_ax(ax)
infl = results.get_influence()
leverage = infl.hat_matrix_diag
resid = zscore(results.resid)
ax.plot(resid**2, leverage, 'o', **kwargs)
ax.set_xlabel("Normalized residuals**2")
ax.set_ylabel("Leverage")
ax.set_title("Leverage vs. Normalized residuals squared")
large_leverage = leverage > _high_leverage(results)
#norm or t here if standardized?
cutoff = norm.ppf(1.-alpha/2)
large_resid = np.abs(resid) > cutoff
labels = results.model.data.row_labels
if labels is None:
labels = lrange(int(results.nobs))
index = np.where(np.logical_or(large_leverage, large_resid))[0]
ax = utils.annotate_axes(index, labels, lzip(resid**2, leverage),
[(0, 5)]*int(results.nobs), "large",
ax=ax, ha="center", va="bottom")
ax.margins(.075, .075)
return fig
def plot_added_variable(results, focus_exog, resid_type=None,
use_glm_weights=True, fit_kwargs=None, ax=None):
# Docstring attached below
model = results.model
fig, ax = utils.create_mpl_ax(ax)
endog_resid, focus_exog_resid =\
added_variable_resids(results, focus_exog,
resid_type=resid_type,
use_glm_weights=use_glm_weights,
fit_kwargs=fit_kwargs)
ax.plot(focus_exog_resid, endog_resid, 'o', alpha=0.6)
ax.set_title('Added variable plot', fontsize='large')
if type(focus_exog) is str:
xname = focus_exog
else:
xname = model.exog_names[focus_exog]
ax.set_xlabel(xname, size=15)
ax.set_ylabel(model.endog_names + " residuals", size=15)
return fig
plot_added_variable.__doc__ = _plot_added_variable_doc % {
'extra_params_doc' : "results: object\n\tResults for a fitted regression model"}
def plot_partial_residuals(results, focus_exog, ax=None):
# Docstring attached below
model = results.model
focus_exog, focus_col = utils.maybe_name_or_idx(focus_exog, model)
pr = partial_resids(results, focus_exog)
focus_exog_vals = results.model.exog[:, focus_col]
fig, ax = utils.create_mpl_ax(ax)
ax.plot(focus_exog_vals, pr, 'o', alpha=0.6)
ax.set_title('Partial residuals plot', fontsize='large')
if type(focus_exog) is str:
xname = focus_exog
else:
xname = model.exog_names[focus_exog]
ax.set_xlabel(xname, size=15)
ax.set_ylabel("Component plus residual", size=15)
return fig
plot_partial_residuals.__doc__ = _plot_partial_residuals_doc % {
'extra_params_doc' : "results: object\n\tResults for a fitted regression model"}
def plot_ceres_residuals(results, focus_exog, frac=0.66, cond_means=None,
ax=None):
# Docstring attached below
model = results.model
focus_exog, focus_col = utils.maybe_name_or_idx(focus_exog, model)
presid = ceres_resids(results, focus_exog, frac=frac,
cond_means=cond_means)
focus_exog_vals = model.exog[:, focus_col]
fig, ax = utils.create_mpl_ax(ax)
ax.plot(focus_exog_vals, presid, 'o', alpha=0.6)
ax.set_title('CERES residuals plot', fontsize='large')
ax.set_xlabel(focus_exog, size=15)
ax.set_ylabel("Component plus residual", size=15)
return fig
plot_ceres_residuals.__doc__ = _plot_ceres_residuals_doc % {
'extra_params_doc' : "results: object\n\tResults for a fitted regression model"}
def ceres_resids(results, focus_exog, frac=0.66, cond_means=None):
"""
Calculate the CERES residuals (Conditional Expectation Partial
Residuals) for a fitted model.
Parameters
----------
results : model results instance
The fitted model for which the CERES residuals are calculated.
focus_exog : int
The column of results.model.exog used as the 'focus variable'.
frac : float, optional
Lowess smoothing parameter for estimating the conditional
means. Not used if `cond_means` is provided.
cond_means : array-like, optional
If provided, the columns of this array are the conditional
means E[exog | focus exog], where exog ranges over some
or all of the columns of exog other than focus exog. If
this is an empty nx0 array, the conditional means are
treated as being zero. If None, the conditional means are
estimated.
Returns
-------
An array containing the CERES residuals.
Notes
-----
If `cond_means` is not provided, it is obtained by smoothing each
column of exog (except the focus column) against the focus column.
Currently only supports GLM, GEE, and OLS models.
"""
model = results.model
if not isinstance(model, (GLM, GEE, OLS)):
raise ValueError("ceres residuals not available for %s" %
model.__class__.__name__)
focus_exog, focus_col = utils.maybe_name_or_idx(focus_exog, model)
# Indices of non-focus columns
ix_nf = range(len(results.params))
ix_nf = list(ix_nf)
ix_nf.pop(focus_col)
nnf = len(ix_nf)
# Estimate the conditional means if not provided.
if cond_means is None:
# Below we calculate E[x | focus] where x is each column other
# than the focus column. We don't want the intercept when we do
# this so we remove it here.
pexog = model.exog[:, ix_nf]
pexog -= pexog.mean(0)
u, s, vt = np.linalg.svd(pexog, 0)
ii = np.flatnonzero(s > 1e-6)
pexog = u[:, ii]
fcol = model.exog[:, focus_col]
cond_means = np.empty((len(fcol), pexog.shape[1]))
for j in range(pexog.shape[1]):
# Get the fitted values for column i given the other
# columns (skip the intercept).
y0 = pexog[:, j]
cf = lowess(y0, fcol, frac=frac, return_sorted=False)
cond_means[:, j] = cf
new_exog = np.concatenate((model.exog[:, ix_nf], cond_means), axis=1)
# Refit the model using the adjusted exog values
klass = model.__class__
init_kwargs = model._get_init_kwds()
new_model = klass(model.endog, new_exog, **init_kwargs)
new_result = new_model.fit()
# The partial residual, with respect to l(x2) (notation of Cook 1998)
presid = model.endog - new_result.fittedvalues
if isinstance(model, (GLM, GEE)):
presid *= model.family.link.deriv(new_result.fittedvalues)
if new_exog.shape[1] > nnf:
presid += np.dot(new_exog[:, nnf:], new_result.params[nnf:])
return presid
def partial_resids(results, focus_exog):
"""
Returns partial residuals for a fitted model with respect to a
'focus predictor'.
Parameters
----------
results : results instance
A fitted regression model.
focus col : int
The column index of model.exog with respect to which the
partial residuals are calculated.
Returns
-------
An array of partial residuals.
References
----------
RD Cook and R Croos-Dabrera (1998). Partial residual plots in
generalized linear models. Journal of the American Statistical
Association, 93:442.
"""
# TODO: could be a method of results
# TODO: see Cook et al (1998) for a more general definition
# The calculation follows equation (8) from Cook's paper.
model = results.model
resid = model.endog - results.predict()
if isinstance(model, (GLM, GEE)):
resid *= model.family.link.deriv(results.fittedvalues)
elif isinstance(model, (OLS, GLS, WLS)):
pass # No need to do anything
else:
raise ValueError("Partial residuals for '%s' not implemented."
% type(model))
if type(focus_exog) is str:
focus_col = model.exog_names.index(focus_exog)
else:
focus_col = focus_exog
focus_val = results.params[focus_col] * model.exog[:, focus_col]
return focus_val + resid
def added_variable_resids(results, focus_exog, resid_type=None,
use_glm_weights=True, fit_kwargs=None):
"""
Residualize the endog variable and a 'focus' exog variable in a
regression model with respect to the other exog variables.
Parameters
----------
results : regression results instance
A fitted model including the focus exog and all other
predictors of interest.
focus_exog : integer or string
The column of results.model.exog or a variable name that is
to be residualized against the other predictors.
resid_type : string
The type of residuals to use for the dependent variable. If
None, uses `resid_deviance` for GLM/GEE and `resid` otherwise.
use_glm_weights : bool
Only used if the model is a GLM or GEE. If True, the
residuals for the focus predictor are computed using WLS, with
the weights obtained from the IRLS calculations for fitting
the GLM. If False, unweighted regression is used.
fit_kwargs : dict, optional
Keyword arguments to be passed to fit when refitting the
model.
Returns
-------
endog_resid : array-like
The residuals for the original exog
focus_exog_resid : array-like
The residuals for the focus predictor
Notes
-----
The 'focus variable' residuals are always obtained using linear
regression.
Currently only GLM, GEE, and OLS models are supported.
"""
model = results.model
if not isinstance(model, (GEE, GLM, OLS)):
raise ValueError("model type %s not supported for added variable residuals" %
model.__class__.__name__)
exog = model.exog
endog = model.endog
focus_exog, focus_col = utils.maybe_name_or_idx(focus_exog, model)
focus_exog_vals = exog[:, focus_col]
# Default residuals
if resid_type is None:
if isinstance(model, (GEE, GLM)):
resid_type = "resid_deviance"
else:
resid_type = "resid"
ii = range(exog.shape[1])
ii = list(ii)
ii.pop(focus_col)
reduced_exog = exog[:, ii]
start_params = results.params[ii]
klass = model.__class__
kwargs = model._get_init_kwds()
new_model = klass(endog, reduced_exog, **kwargs)
args = {"start_params": start_params}
if fit_kwargs is not None:
args.update(fit_kwargs)
new_result = new_model.fit(**args)
if not new_result.converged:
raise ValueError("fit did not converge when calculating added variable residuals")
try:
endog_resid = getattr(new_result, resid_type)
except AttributeError:
raise ValueError("'%s' residual type not available" % resid_type)
import statsmodels.regression.linear_model as lm
if isinstance(model, (GLM, GEE)) and use_glm_weights:
weights = model.family.weights(results.fittedvalues)
if hasattr(model, "data_weights"):
weights = weights * model.data_weights
lm_results = lm.WLS(focus_exog_vals, reduced_exog, weights).fit()
else:
lm_results = lm.OLS(focus_exog_vals, reduced_exog).fit()
focus_exog_resid = lm_results.resid
return endog_resid, focus_exog_resid
| bsd-3-clause |
piyurai/sp17-i524 | project/S17-IO-3017/code/projectearth/kmeansplot.py | 14 | 3018 | import requests
import time
import dblayer
import plotly
import plotly.graph_objs as go
import pandas as pd
import numpy as np
import random
from sklearn.cluster import KMeans
import testfile
NUM_CLUSTER = 3
def generate_color():
color = '#{:02x}{:02x}{:02x}'.format(*map(lambda x: random.randint(0, 255), range(NUM_CLUSTER)))
return color
# Create random colors in list
color_list = []
for i in range(NUM_CLUSTER):
color_list.append(generate_color())
def showMagnitudesInCluster(data):
kmeans = KMeans(n_clusters=NUM_CLUSTER)
kmeans.fit(data)
labels = kmeans.labels_
centroids = kmeans.cluster_centers_
plot_data = []
for i in range(NUM_CLUSTER):
ds = data[np.where(labels == i)]
clustername = "Cluster " + str(i+1)
trace = go.Scatter(x=ds[:, 0], y=ds[:, 1], mode='markers', showlegend='false', name=clustername, marker=dict(size=5, color=color_list[i]))
plot_data.append(trace)
# plot the centroids
trace = go.Scatter(x=centroids[i, 0], y=centroids[i, 1], mode='markers', marker=dict(size=10, color='black'))
plot_data.append(trace)
layout = go.Layout(title='Magnitude Vs. Depth - K-Means Clusters', titlefont=dict(family='Courier New, monospace',size=20,color='#7f7f7f'),
xaxis=dict(title='Depth of Earthquake', titlefont=dict(family='Courier New, monospace',size=18,color='#7f7f7f')),
yaxis=dict(title='Magnitude',titlefont=dict(family='Courier New, monospace',size=18,color='#7f7f7f'))
)
fig = go.Figure(data=plot_data, layout=layout)
div = plotly.offline.plot(fig, include_plotlyjs=True, output_type='div')
return div
def mkMag():
#### TME: Get start time
start_time = time.time()
####
sess = requests.Session()
dbobj = dblayer.classDBLayer()
projection = [
{"$project": {"_id": 0, "mag": "$properties.mag", "depth": {"$arrayElemAt": ["$geometry.coordinates", 2]}}}]
dframe_mag = pd.DataFrame(list(dbobj.doaggregate(projection)))
#### TME: Elapsed time taken to read data from MongoDB
fileobj = testfile.classFileWrite()
elapsed = time.time() - start_time
fileobj.writeline()
str1 = str(elapsed) + " secs required to read " + str(dframe_mag['depth'].count()) + " records from database."
fileobj.writelog("Reading Magnitude and Depth data")
fileobj.writelog(str1)
####
#### TME: Get start time
start_time = time.time()
####
div = showMagnitudesInCluster(dframe_mag.values)
response = """<html><title></title><head><meta charset=\"utf8\"> </head> <body>""" + div + """</body> </html>"""
#### TME: Elapsed time taken to cluster and plot data
elapsed = time.time() - start_time
fileobj.writeline()
str1 = "Applying K-Means clustering and plotting its output \n" + "Time taken: " + str(elapsed)
fileobj.writelog(str1)
fileobj.writeline()
fileobj.closefile()
dbobj.closedb()
return response
| apache-2.0 |
IBCCW/mavlink | pymavlink/tools/mavgpslag.py | 43 | 3446 | #!/usr/bin/env python
'''
calculate GPS lag from DF log
'''
import sys, time, os
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("--plot", action='store_true', default=False, help="plot errors")
parser.add_argument("--minspeed", type=float, default=6, help="minimum speed")
parser.add_argument("logs", metavar="LOG", nargs="+")
args = parser.parse_args()
from pymavlink import mavutil
from pymavlink.mavextra import *
from pymavlink.rotmat import Vector3, Matrix3
'''
Support having a $HOME/.pymavlink/mavextra.py for extra graphing functions
'''
home = os.getenv('HOME')
if home is not None:
extra = os.path.join(home, '.pymavlink', 'mavextra.py')
if os.path.exists(extra):
import imp
mavuser = imp.load_source('pymavlink.mavuser', extra)
from pymavlink.mavuser import *
def velocity_error(timestamps, vel, gaccel, accel_indexes, imu_dt, shift=0):
'''return summed velocity error'''
sum = 0
count = 0
for i in range(0, len(vel)-1):
dv = vel[i+1] - vel[i]
da = Vector3()
for idx in range(1+accel_indexes[i]-shift, 1+accel_indexes[i+1]-shift):
da += gaccel[idx]
dt1 = timestamps[i+1] - timestamps[i]
dt2 = (accel_indexes[i+1] - accel_indexes[i]) * imu_dt
da *= imu_dt
da *= dt1/dt2
#print(accel_indexes[i+1] - accel_indexes[i])
ex = abs(dv.x - da.x)
ey = abs(dv.y - da.y)
sum += 0.5*(ex+ey)
count += 1
if count == 0:
return None
return sum/count
def gps_lag(logfile):
'''work out gps velocity lag times for a log file'''
print("Processing log %s" % filename)
mlog = mavutil.mavlink_connection(filename)
timestamps = []
vel = []
gaccel = []
accel_indexes = []
ATT = None
IMU = None
dtsum = 0
dtcount = 0
while True:
m = mlog.recv_match(type=['GPS','IMU','ATT'])
if m is None:
break
t = m.get_type()
if t == 'GPS' and m.Status==3 and m.Spd>args.minspeed:
v = Vector3(m.Spd*cos(radians(m.GCrs)), m.Spd*sin(radians(m.GCrs)), m.VZ)
vel.append(v)
timestamps.append(m._timestamp)
accel_indexes.append(max(len(gaccel)-1,0))
elif t == 'ATT':
ATT = m
elif t == 'IMU':
if ATT is not None:
gaccel.append(earth_accel_df(m, ATT))
if IMU is not None:
dt = m._timestamp - IMU._timestamp
dtsum += dt
dtcount += 1
IMU = m
imu_dt = dtsum / dtcount
print("Loaded %u samples imu_dt=%.3f" % (len(vel), imu_dt))
besti = -1
besterr = 0
delays = []
errors = []
for i in range(0,100):
err = velocity_error(timestamps, vel, gaccel, accel_indexes, imu_dt, shift=i)
if err is None:
break
errors.append(err)
delays.append(i*imu_dt)
if besti == -1 or err < besterr:
besti = i
besterr = err
print("Best %u (%.3fs) %f" % (besti, besti*imu_dt, besterr))
if args.plot:
import matplotlib.pyplot as plt
plt.plot(delays, errors, 'bo-')
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,0,y2))
plt.ylabel('Error')
plt.xlabel('Delay(s)')
plt.show()
for filename in args.logs:
gps_lag(filename)
| lgpl-3.0 |
Fuchai/Philosophy-Machine | undirected/decision_tree.py | 1 | 2242 | from __future__ import division
from __future__ import print_function
from mush import *
from sklearn import tree
from sklearn import preprocessing
x=data[:,np.arange(1,24)]
y=data[:,0]
# we turn all the data to one-hot encoding.
# pretty stupid, but given how much time I am willing to invest in this project, this is sound
# all nodes, probably 100 of them, will be a long vector of booleans
# each instance will be marked on them
# the tree predicts dim 0
# print(mushroom.nodes())
lb=preprocessing.MultiLabelBinarizer()
x_nodes=set(_ for _ in mushroom.nodes() if "_0" not in _)
# print("x_nodes",x_nodes)
lb.fit_transform([x_nodes])
# print("classes",lb.classes_)
# print('x[0]',x[0])
# print(lb.transform([x[0]]))
x=lb.transform(x)
y=[0 if _=="e_0" else 1 for _ in y]
# print(y)
clf = tree.DecisionTreeClassifier()
clf.fit(x,y)
print(len(test))
testx=test[:,np.arange(1,24)]
testy=test[:,0]
testx=lb.transform(testx)
testy=[0 if _=="e_0" else 1 for _ in testy]
print(clf.predict(testx))
result=clf.predict(testx)==testy
print(sum(result),sum(~result))
# in the end tree methods achieved 100% correct rate
# the gibbs sampling with undirected graph failed to capture the complicated interaction among factors
# I wish there is a way to utilize decision tree algorithm to extract useful rules
# That solves the composite discovery problem doesn't it?
# I could use NN to make inferences. But that's not valuable.
# I should tttest out the rules that I distilled on the last run
# Modify the criterion a bit and put it to tttest.
# It's just functions you know.
# Everything is just functions. Everything is neural networks, piping one after another.
# Created to imitate the structure known a priori. Convolution. Recurrence. I just need some creative mind to hack it.
# But I think those are trivial.
# Somehow I am very deviated from the starting point. I should go read some to pick it up again.
# NN is bad. Because it's not interpretable. It's not integratable.
# To solve it, make NN modular with graph, and this was my initial inspiration.
# How do I do it? I really feel like I have everything I need now. I have correlation graph, I have directed KF, I have NN,
# I have inference methods.
# How do I put them together? | apache-2.0 |
fusion809/python-scripts | IVP/SciPy/lorenz_animation.py | 1 | 2397 | #!/usr/bin/env python2
# By Jake Vanderplas
import numpy as np
from scipy import integrate
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import cnames
from matplotlib import animation
N_trajectories = 20
def lorentz_deriv((x, y, z), t0, sigma=10., beta=8./3, rho=28.0):
"""Compute the time-derivative of a Lorentz system."""
return [sigma * (y - x), x * (rho - z) - y, x * y - beta * z]
# Choose random starting points, uniformly distributed from -15 to 15
np.random.seed(1)
x0 = -15 + 30 * np.random.random((N_trajectories, 3))
# Solve for the trajectories
t = np.linspace(0, 40, 10000)
x_t = np.asarray([integrate.odeint(lorentz_deriv, x0i, t)
for x0i in x0])
# Set up figure & 3D axis for animation
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1], projection='3d')
ax.axis('off')
# choose a different color for each trajectory
colors = plt.cm.jet(np.linspace(0, 1, N_trajectories))
# set up lines and points
lines = sum([ax.plot([], [], [], '-', c=c)
for c in colors], [])
pts = sum([ax.plot([], [], [], 'o', c=c)
for c in colors], [])
# prepare the axes limits
ax.set_xlim((-25, 25))
ax.set_ylim((-35, 35))
ax.set_zlim((5, 55))
# set point-of-view: specified by (altitude degrees, azimuth degrees)
ax.view_init(30, 0)
# initialization function: plot the background of each frame
def init():
for line, pt in zip(lines, pts):
line.set_data([], [])
line.set_3d_properties([])
pt.set_data([], [])
pt.set_3d_properties([])
return lines + pts
# animation function. This will be called sequentially with the frame number
def animate(i):
# we'll step two time-steps per frame. This leads to nice results.
i = (2 * i) % x_t.shape[1]
for line, pt, xi in zip(lines, pts, x_t):
x, y, z = xi[:i].T
line.set_data(x, y)
line.set_3d_properties(z)
pt.set_data(x[-1:], y[-1:])
pt.set_3d_properties(z[-1:])
ax.view_init(30, 0.3 * i)
fig.canvas.draw()
return lines + pts
# instantiate the animator.
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=500, interval=30, blit=True)
# Save as mp4. This requires mplayer or ffmpeg to be installed
#anim.save('lorentz_attractor.mp4', fps=15, extra_args=['-vcodec', 'libx264'])
plt.show()
| gpl-3.0 |
jmhsi/justin_tinker | data_science/courses/temp/utils.py | 2 | 4479 | import math, os, json, sys, re, numpy as np, pickle, PIL, scipy
from PIL import Image
from glob import glob
from matplotlib import pyplot as plt
from operator import itemgetter, attrgetter, methodcaller
from collections import OrderedDict
import itertools
from itertools import chain
import pandas as pd
from numpy.random import random, permutation, randn, normal, uniform, choice
from numpy import newaxis
from scipy import misc, ndimage
from scipy.ndimage.interpolation import zoom
from scipy.ndimage import imread
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import OneHotEncoder
from sklearn.manifold import TSNE
import bcolz
from IPython.lib.display import FileLink
import keras
from keras import backend as K
from keras.utils.data_utils import get_file
from keras.utils import np_utils
from keras.utils.np_utils import to_categorical
from keras.models import Sequential, Model
from keras.layers import Input, Embedding, Reshape, merge, LSTM, Bidirectional
from keras.layers import TimeDistributed, Activation, SimpleRNN, GRU
from keras.layers import Flatten, Dense, Dropout, Lambda
from keras.regularizers import l2, l1
from keras.layers.normalization import BatchNormalization
from keras.optimizers import SGD, RMSprop, Adam
from keras.layers import deserialize as layer_from_config
from keras.metrics import categorical_crossentropy, categorical_accuracy
from keras.layers.convolutional import *
from keras.preprocessing import image, sequence
from keras.preprocessing.text import Tokenizer
from vgg16 import Vgg16
np.set_printoptions(precision=4, linewidth=100)
to_bw = np.array([0.299, 0.587, 0.114])
def gray(img): return np.rollaxis(img, 0, 1).dot(to_bw)
def to_plot(img): return np.rollaxis(img, 0, 1).astype(np.uint8)
def plot(img): plt.imshow(to_plot(img))
def floor(x): return int(math.floor(x))
def ceil(x): return int(math.ceil(x))
def plots(ims, figsize=(12,6), rows=1, interp=False, titles=None):
if type(ims[0]) is np.ndarray:
ims = np.array(ims).astype(np.uint8)
if (ims.shape[-1] != 3): ims = ims.transpose((0,2,3,1))
f = plt.figure(figsize=figsize)
for i in range(len(ims)):
sp = f.add_subplot(rows, len(ims)//rows, i+1)
sp.axis('Off')
if titles is not None: sp.set_title(titles[i], fontsize=16)
plt.imshow(ims[i], interpolation=None if interp else 'none')
def do_clip(arr, mx):
clipped = np.clip(arr, (1-mx)/1, mx)
return clipped/clipped.sum(axis=1)[:, np.newaxis]
def wrap_config(layer):
return {'class_name': layer.__class__.__name__, 'config': layer.get_config()}
def copy_layer(layer): return layer_from_config(wrap_config(layer))
def copy_layers(layers): return [copy_layer(layer) for layer in layers]
def copy_weights(from_layers, to_layers):
for from_layer,to_layer in zip(from_layers, to_layers):
to_layer.set_weights(from_layer.get_weights())
def save_array(fname, arr):
c=bcolz.carray(arr, rootdir=fname, mode='w')
c.flush()
def load_array(fname): return bcolz.open(fname)[:]
def get_classes(path):
batches = get_batches(path+'train', shuffle=False, batch_size=1)
val_batches = get_batches(path+'valid', shuffle=False, batch_size=1)
test_batches = get_batches(path+'test', shuffle=False, batch_size=1)
return (val_batches.classes, batches.classes, onehot(val_batches.classes), onehot(batches.classes),
val_batches.filenames, batches.filenames, test_batches.filenames)
def limit_mem():
K.get_session().close()
cfg = K.tf.ConfigProto()
cfg.gpu_options.allow_growth = True
K.set_session(K.tf.Session(config=cfg))
class MixIterator(object):
def __init__(self, iters):
self.iters = iters
self.multi = type(iters) is list
if self.multi:
self.N = sum([it[0].N for it in self.iters])
else:
self.N = sum([it.N for it in self.iters])
def reset(self):
for it in self.iters: it.reset()
def __iter__(self):
return self
def next(self, *args, **kwargs):
if self.multi:
nexts = [[next(it) for it in o] for o in self.iters]
n0 = np.concatenate([n[0] for n in nexts])
n1 = np.concatenate([n[1] for n in nexts])
return (n0, n1)
else:
nexts = [next(it) for it in self.iters]
n0 = np.concatenate([n[0] for n in nexts])
n1 = np.concatenate([n[1] for n in nexts])
return (n0, n1)
| apache-2.0 |
harshaneelhg/scikit-learn | sklearn/datasets/mlcomp.py | 289 | 3855 | # Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| bsd-3-clause |
lancezlin/ml_template_py | lib/python2.7/site-packages/sklearn/neighbors/approximate.py | 27 | 22368 | """Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <[email protected]>
# Joel Nothman <[email protected]>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimension as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest(random_state=42)
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=42)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = True)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[[i]], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[[i]], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| mit |
junbochen/pylearn2 | pylearn2/cross_validation/tests/test_dataset_iterators.py | 49 | 6535 | """
Test cross-validation dataset iterators.
"""
from pylearn2.config import yaml_parse
from pylearn2.testing.skip import skip_if_no_sklearn
def test_dataset_k_fold():
"""Test DatasetKFold."""
skip_if_no_sklearn()
mapping = {'dataset_iterator': 'DatasetKFold'}
test_yaml = test_yaml_dataset_iterator % mapping
trainer = yaml_parse.load(test_yaml)
trainer.main_loop()
def test_stratified_dataset_k_fold():
"""Test StratifiedDatasetKFold."""
skip_if_no_sklearn()
mapping = {'dataset_iterator': 'StratifiedDatasetKFold'}
test_yaml = test_yaml_dataset_iterator % mapping
trainer = yaml_parse.load(test_yaml)
trainer.main_loop()
def test_dataset_shuffle_split():
"""Test DatasetShuffleSplit."""
skip_if_no_sklearn()
mapping = {'dataset_iterator': 'DatasetShuffleSplit'}
test_yaml = test_yaml_dataset_iterator % mapping
trainer = yaml_parse.load(test_yaml)
trainer.main_loop()
def test_stratified_dataset_shuffle_split():
"""Test StratifiedDatasetShuffleSplit."""
skip_if_no_sklearn()
mapping = {'dataset_iterator': 'StratifiedDatasetShuffleSplit'}
test_yaml = test_yaml_dataset_iterator % mapping
trainer = yaml_parse.load(test_yaml)
trainer.main_loop()
def test_dataset_validation_k_fold():
"""Test DatasetValidKFold."""
skip_if_no_sklearn()
mapping = {'dataset_iterator': 'DatasetValidationKFold'}
test_yaml = test_yaml_dataset_iterator % mapping
trainer = yaml_parse.load(test_yaml)
trainer.main_loop()
def test_stratified_dataset_validation_k_fold():
"""Test StratifiedDatasetValidKFold."""
skip_if_no_sklearn()
mapping = {'dataset_iterator': 'StratifiedDatasetValidationKFold'}
test_yaml = test_yaml_dataset_iterator % mapping
trainer = yaml_parse.load(test_yaml)
trainer.main_loop()
def test_dataset_validation_shuffle_split():
"""Test DatasetValidShuffleSplit."""
skip_if_no_sklearn()
mapping = {'dataset_iterator': 'DatasetValidationShuffleSplit'}
test_yaml = test_yaml_dataset_iterator % mapping
trainer = yaml_parse.load(test_yaml)
trainer.main_loop()
def test_stratified_dataset_validation_shuffle_split():
"""Test StratifiedDatasetValidShuffleSplit."""
skip_if_no_sklearn()
mapping = {'dataset_iterator': 'StratifiedDatasetValidationShuffleSplit'}
test_yaml = test_yaml_dataset_iterator % mapping
trainer = yaml_parse.load(test_yaml)
trainer.main_loop()
def test_which_set():
"""Test which_set selector."""
skip_if_no_sklearn()
# one label
this_yaml = test_yaml_which_set % {'which_set': 'train'}
trainer = yaml_parse.load(this_yaml)
trainer.main_loop()
# multiple labels
this_yaml = test_yaml_which_set % {'which_set': ['train', 'test']}
trainer = yaml_parse.load(this_yaml)
trainer.main_loop()
# improper label (iterator only returns 'train' and 'test' subsets)
this_yaml = test_yaml_which_set % {'which_set': 'valid'}
try:
trainer = yaml_parse.load(this_yaml)
trainer.main_loop()
raise AssertionError
except ValueError:
pass
# bogus label (not in approved list)
this_yaml = test_yaml_which_set % {'which_set': 'bogus'}
try:
yaml_parse.load(this_yaml)
raise AssertionError
except ValueError:
pass
def test_no_targets():
"""Test cross-validation without targets."""
skip_if_no_sklearn()
trainer = yaml_parse.load(test_yaml_no_targets)
trainer.main_loop()
test_yaml_dataset_iterator = """
!obj:pylearn2.cross_validation.TrainCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.%(dataset_iterator)s {
dataset:
!obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 100,
dim: 10,
num_classes: 2,
},
},
model: !obj:pylearn2.models.autoencoder.Autoencoder {
nvis: 10,
nhid: 8,
act_enc: 'sigmoid',
act_dec: 'linear'
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
batch_size: 50,
line_search_mode: 'exhaustive',
conjugate: 1,
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
cost: !obj:pylearn2.costs.autoencoder.MeanSquaredReconstructionError {
},
},
}
"""
test_yaml_which_set = """
!obj:pylearn2.cross_validation.TrainCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.DatasetKFold {
dataset:
!obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 100,
dim: 10,
num_classes: 2,
},
which_set: %(which_set)s,
},
model: !obj:pylearn2.models.autoencoder.Autoencoder {
nvis: 10,
nhid: 8,
act_enc: 'sigmoid',
act_dec: 'linear'
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
batch_size: 50,
line_search_mode: 'exhaustive',
conjugate: 1,
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
cost: !obj:pylearn2.costs.autoencoder.MeanSquaredReconstructionError {
},
},
}
"""
test_yaml_no_targets = """
!obj:pylearn2.cross_validation.TrainCV {
dataset_iterator:
!obj:pylearn2.cross_validation.dataset_iterators.DatasetKFold {
dataset:
!obj:pylearn2.testing.datasets.random_dense_design_matrix
{
rng: !obj:numpy.random.RandomState { seed: 1 },
num_examples: 100,
dim: 10,
num_classes: 0,
},
},
model: !obj:pylearn2.models.autoencoder.Autoencoder {
nvis: 10,
nhid: 8,
act_enc: 'sigmoid',
act_dec: 'linear'
},
algorithm: !obj:pylearn2.training_algorithms.bgd.BGD {
batch_size: 50,
line_search_mode: 'exhaustive',
conjugate: 1,
termination_criterion:
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 1,
},
cost: !obj:pylearn2.costs.autoencoder.MeanSquaredReconstructionError {
},
},
}
"""
| bsd-3-clause |
pandastrail/InfoEng | scripting/exercises/find_closest_point.py | 1 | 2292 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 25 15:44:42 2017
@author: hase
Find closest point
- There is a board with (x,y) points
- The user input a (x,y) point
- The program returns the closest point to the one given and the distance
between them
"""
# Modules
import matplotlib.pyplot as plt
import numpy as np
# Functions
def galaxy(dots):
'''Generate a n x 2 random array using numpy.random
This will be the board of points to compare the user input'''
coord_array = np.random.random_sample((dots, 2)) * 10
# random sample multiplied by 10 to get pairs between 0 and 10
coord_list = list(coord_array)
return coord_list
def closest(coordinates):
''' Calculate closest point to a given coordinates and the
distance between this closest point and the given coordinates.
It outputs the first closest point found.
What if there are more than one closest point'''
if type(coordinates) != type([]):
raise Exception('Coordinates must have a type list')
# The exception defined above is no longer valid after np.random
point = None
distance = None
x = []
y = []
for k in coordinates:
x.append(k[0]) # Append to list of x's to plot
y.append(k[1]) # Append to list of y's to plot
if k is None: continue
# And calculate euclidean distance:
d = (((k[0] - goal[0])**2 + (k[1] - goal[1])**2) ** 0.5) # alt for math.sqrt
if point is None or d < distance: # Reassign if needed
point = k
distance = d
return point, distance, x, y
def user():
'''Get user coordinates and transform to tuple.
Needs to be optimized to accept float
and accept values larger than 9'''
goal_list = []
goal_raw = input('your x,y coordinates? ')
goal_list.append(int(goal_raw[0]))
goal_list.append(int(goal_raw[2]))
goal = tuple(goal_list)
return goal
# Execute
goal = user()
dots = int(input('How many points to draw? ')) # Needs try and except
coord_list = galaxy(dots)
point, distance, x, y = closest(coord_list)
plt.scatter(x,y)
plt.scatter(goal[0], goal[1], s=100)
plt.show()
print('Point given', goal, 'is closest to:')
print('point', point, 'with distance', "%.2f" % distance) | gpl-3.0 |
soulmachine/scikit-learn | sklearn/datasets/samples_generator.py | 3 | 53169 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import warnings
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=2)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. Number of labels follows
a Poisson distribution that never takes the value 0.
length : int, optional (default=50)
Sum of the features (number of words if documents).
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
return_indicator : bool, optional (default=False),
If ``True``, return ``Y`` in the binary indicator format, else
return a tuple of lists of labels.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array or sparse CSR matrix of shape [n_samples, n_features]
The generated samples.
Y : tuple of lists or array of shape [n_samples, n_classes]
The label sets.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
if return_indicator:
lb = MultiLabelBinarizer()
Y = lb.fit([range(n_classes)]).transform(Y)
else:
warnings.warn('Support for the sequence of sequences multilabel '
'representation is being deprecated and replaced with '
'a sparse indicator matrix. '
'return_indicator will default to True from version '
'0.17.',
DeprecationWarning)
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See the `make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples / 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if not noise is None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples / 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if not noise is None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, n in enumerate(n_samples_per_center):
X.append(centers[i] + generator.normal(scale=cluster_std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Parameters
----------
dim: integer, optional (default=1)
The size of the random (matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
prec: array of shape = [dim, dim]
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
d = np.diag(prec)
d = 1. / np.sqrt(d)
prec *= d
prec *= d[:, np.newaxis]
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perpsective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
kedaio/tushare | tushare/trader/trader.py | 1 | 10985 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
Created on 2016年9月25日
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
'''
import six
import pandas as pd
import requests
import time
from threading import Thread
from tushare.trader import vars as vs
from tushare.trader import utils
from tushare.util import upass as up
class TraderAPI(object):
"""
股票实盘交易接口
提醒:本文涉及的思路和内容仅限于量化投资及程序化交易的研究与尝试,不作为个人或机构常规程序化交易的依据,
不对实盘的交易风险和政策风险产生的影响负责,如有问题请与我联系。
投资有风险,下单须谨慎。
"""
def __init__(self, broker = ''):
if broker == '':
return None
self.broker = broker
self.trade_prefix = vs.CSC_PREFIX % (vs.P_TYPE['https'],
vs.DOMAINS['csc'],
vs.PAGES['csclogin'])
self.heart_active = True
self.s = requests.session()
if six.PY2:
self.heart_thread = Thread(target = self.send_heartbeat)
self.heart_thread.setDaemon(True)
else:
self.heart_thread = Thread(target = self.send_heartbeat,
daemon=True)
def login(self):
self.s.headers.update(vs.AGENT)
self.s.get(vs.CSC_PREFIX % (vs.P_TYPE['https'], vs.DOMAINS['csc'],
vs.PAGES['csclogin']))
res = self.s.get(vs.V_CODE_URL%(vs.P_TYPE['https'],
vs.DOMAINS['csc'],
vs.PAGES['vimg']))
if self._login(utils.get_vcode('csc', res)) is False:
print('请确认账号或密码是否正确 ,或券商服务器是否处于维护中。 ')
self.keepalive()
def _login(self, v_code):
brokerinfo = up.get_broker(self.broker)
user = brokerinfo['user'][0]
login_params = dict(
inputid = user,
j_username = user,
j_inputid = user,
AppendCode = v_code,
isCheckAppendCode = 'false',
logined = 'false',
f_tdx = '',
j_cpu = '',
j_password = brokerinfo['passwd'][0]
)
logined = self.s.post(vs.CSC_LOGIN_ACTION % (vs.P_TYPE['https'],
vs.DOMAINS['csc']),
params = login_params)
if logined.text.find(u'消息中心') != -1:
return True
return False
def keepalive(self):
if self.heart_thread.is_alive():
self.heart_active = True
else:
self.heart_thread.start()
def send_heartbeat(self):
while True:
if self.heart_active:
try:
response = self.heartbeat()
self.check_account_live(response)
except:
self.login()
time.sleep(100)
else:
time.sleep(10)
def heartbeat(self):
return self.baseinfo
def exit(self):
self.heart_active = False
def buy(self, stkcode, price=0, count=0, amount=0):
"""
买入证券
params
---------
stkcode:股票代码,string
pricce:委托价格,int
count:买入数量
amount:买入金额
"""
jsonobj = utils.get_jdata(self._trading(stkcode, price,
count, amount, 'B', 'buy'))
res = True if jsonobj['result'] == 'true' else False
return res
def sell(self, stkcode, price=0, count=0, amount=0):
"""
卖出证券
params
---------
stkcode:股票代码,string
pricce:委托价格,int
count:卖出数量
amount:卖出金额
"""
jsonobj = utils.get_jdata(self._trading(stkcode, price, count,
amount, 'S', 'sell'))
res = True if jsonobj['result'] == 'true' else False
return res
def _trading(self, stkcode, price, count, amount, tradeflag, tradetype):
txtdata = self.s.get(vs.TRADE_CHECK_URL % (vs.P_TYPE['https'],
vs.DOMAINS['csc'],
vs.PAGES['tradecheck'],
tradeflag, stkcode,
tradetype, utils.nowtime_str()))
jsonobj = utils.get_jdata(txtdata)
list = jsonobj['returnList'][0]
secuid = list['buysSecuid']
fundavl = list['fundavl']
stkname = list['stkname']
if secuid is not None:
if tradeflag == 'B':
buytype = vs.BUY
count = count if count else amount // price // 100 * 100
else:
buytype = vs.SELL
count = count if count else amount // price
tradeparams = dict(
stkname = stkname,
stkcode = stkcode,
secuid = secuid,
buytype = buytype,
bsflag = tradeflag,
maxstkqty = '',
buycount = count,
buyprice = price,
_ = utils.nowtime_str()
)
tradeResult = self.s.post(vs.TRADE_URL % (vs.P_TYPE['https'],
vs.DOMAINS['csc'],
vs.PAGES['trade']),
params = tradeparams)
return tradeResult
return None
def position(self):
"""
获取持仓列表
return:DataFrame
----------------------
stkcode:证券代码
stkname:证券名称
stkqty :证券数量
stkavl :可用数量
lastprice:最新价格
costprice:成本价
income :参考盈亏(元)
"""
return self._get_position()
def _get_position(self):
self.s.headers.update(vs.AGENT)
txtdata = self.s.get(vs.BASE_URL % (vs.P_TYPE['https'],
vs.DOMAINS['csc'],
vs.PAGES['position']))
jsonobj = utils.get_jdata(txtdata)
df = pd.DataFrame(jsonobj['data'], columns=vs.POSITION_COLS)
return df
def entrust_list(self):
"""
获取委托单列表
return:DataFrame
----------
ordersno:委托单号
stkcode:证券代码
stkname:证券名称
bsflagState:买卖标志
orderqty:委托数量
matchqty:成交数量
orderprice:委托价格
operdate:交易日期
opertime:交易时间
orderdate:下单日期
state:状态
"""
txtdata = self.s.get(vs.ENTRUST_LIST_URL % (vs.P_TYPE['https'],
vs.DOMAINS['csc'],
vs.PAGES['entrustlist'],
utils.nowtime_str()))
jsonobj = utils.get_jdata(txtdata)
df = pd.DataFrame(jsonobj['data'], columns=vs.ENTRUST_LIST_COLS)
return df
def deal_list(self, begin=None, end=None):
"""
获取成交列表
params
-----------
begin:开始日期 YYYYMMDD
end:结束日期 YYYYMMDD
return: DataFrame
-----------
ordersno:委托单号
matchcode:成交编号
trddate:交易日期
matchtime:交易时间
stkcode:证券代码
stkname:证券名称
bsflagState:买卖标志
orderprice:委托价格
matchprice:成交价格
orderqty:委托数量
matchqty:成交数量
matchamt:成交金额
"""
daterange = ''
if (begin is None) & (end is None):
selecttype = 'intraDay'
else:
daterange = vs.DEAL_DATE_RANGE % (begin, end)
selecttype = 'all'
txtdata = self.s.get(vs.DEAL_LIST_URL % (vs.P_TYPE['https'],
vs.DOMAINS['csc'],
vs.PAGES['deallist'],
selecttype, daterange,
utils.nowtime_str()))
jsonobj = utils.get_jdata(txtdata)
df = pd.DataFrame(jsonobj['data'], columns=vs.DEAL_LIST_COLS)
return df
def cancel(self, ordersno='', orderdate=''):
"""
撤单
params
-----------
ordersno:委托单号,多个以逗号分隔,e.g. 1866,1867
orderdata:委托日期 YYYYMMDD,多个以逗号分隔,对应委托单好
return
------------
string
"""
if (ordersno != '') & (orderdate != ''):
params = dict(
ordersno = ordersno,
orderdate = orderdate,
_ = utils.nowtime_str()
)
result = self.s.post(vs.CANCEL_URL % (vs.P_TYPE['https'], vs.DOMAINS['csc'], vs.PAGES['cancel']),
params = params)
jsonobj = utils.get_jdata(result.text)
return jsonobj['msgMap']['ResultSucess']
return None
def baseinfo(self):
"""
获取帐户基本信息
return: Series
-------------
fundid:帐户ID
gpsz: 股票市值
fundvalue:基金市值
jihelicai:集合理财
fundbal:帐户余额
marketvalue:总资产
fundavl:可用余额
daixiao:代销份额
otc:OTC份额
"""
return self._get_baseinfo()
def _get_baseinfo(self):
self.s.headers.update(vs.AGENT)
txtdata = self.s.get(vs.BASE_URL % (vs.P_TYPE['https'], vs.DOMAINS['csc'], vs.PAGES['baseInfo']))
jsonobj = utils.get_jdata(txtdata)
stkdata = jsonobj['data']['moneytype0']
stkdata['fundid'] = jsonobj['fundid']
return pd.Series(stkdata)
def check_login_status(self, return_data):
if hasattr(return_data, 'get') and return_data.get('error_no') == '-1':
raise NotLoginError
class NotLoginError(Exception):
def __init__(self, result=None):
super(NotLoginError, self).__init__()
self.result = result
def heartbeat(self):
return self.baseinfo
| bsd-3-clause |
neurohackweek/avalanche | examples/sg_tutorial/plot_sg_tutorial.py | 7 | 3828 | """
======================================
A quick tour of sphinx-gallery and rST
======================================
One of the most important components of any package is its documentation.
For packages that involve data analysis, visualization of results / data is
a key element of the docs. Sphinx-gallery is an excellent tool for building
narrative-style documentation in the form of `.py` files, as well as for
generating a gallery of sample images that are generated by your various
scripts.
This is a short demo for how sphinx-gallery can be used to generate beautiful,
HTML-rendered documentation using regular python files.
"""
import numpy as np
import matplotlib.pyplot as plt
###############################################################################
# reStructuredText
# ----------------
#
# The primary benefit of sphinx-gallery is that it allows you to interweave
# `reStructuredText <http://docutils.sourceforge.net/rst.html>`_ along with
# your regular python code. This means that you can include formatted text
# with the script, all using regular text files. rST has a particular structure
# it expects in order to render properly (it is what sphinx uses as well).
#
# File headers and naming
# -----------------------
# Sphinx-gallery files must be initialized with a header like the one above.
# It must exist as a part of the triple-quotes docstring at the start of the
# file, and tells SG the title of the page. If you wish, you can include text
# that comes after the header, which will be rendered as a contextual bit of
# information.
#
# In addition, if you want to render a file with sphinx-gallery, it must match
# the file naming structure that the gallery is configured to look for. By
# default, this is `plot_*.py`.
#
# Interweaving code with text
# ---------------------------
#
# Sphinx-gallery allows you to interweave code with your text. For example, if
# put a few lines of text below...
N = 1000
# They will be rendered as regular code. Note that now I am typing in a
# comment, because we've broken the chain of commented lines above.
x = np.random.randn(N)
# If we want to create another formatted block of text, we need to add a line
# of `#` spanning the whole line below. Like this:
###############################################################################
# Now we can once again have nicely formatted $t_{e}\chi^t$!
# Let's create our y-variable so we can make some plots
y = .2 * x + .4 * np.random.randn(N)
###############################################################################
# Plotting images
# ---------------
#
# Sphinx-gallery captures the images generated by matplotlib. This means that
# we can plot things as normal, and these images will be grouped with the
# text block that the fall underneath. For example, we could plot these two
# variables and the image will be shown below:
fig, ax = plt.subplots()
ax.plot(x, y, 'o')
###############################################################################
# Multiple images
# ---------------
#
# If we want multiple images, this is easy too. Sphinx-gallery will group
# everything together that's within the latest text block.
fig, axs = plt.subplots(1, 2)
axs[0].hist(x, bins=20)
axs[1].hist(y, bins=20)
fig, ax = plt.subplots()
ax.hist2d(x, y, bins=20)
###############################################################################
# Other kinds of formatting
# -------------------------
#
# Remember, rST can do all kinds of other cool stuff. We can even do things
# like add references to other packages and insert images. Check out this
# `guide <http://docutils.sourceforge.net/docs/user/rst/quickref.html>`_ for
# some sample rST code.
#
# .. image:: http://www.sphinx-doc.org/en/stable/_static/sphinxheader.png
# :width: 80%
#
# In the meantime, enjoy sphinx-gallery!
| apache-2.0 |
JT5D/scikit-learn | examples/grid_search_digits.py | 8 | 2665 | """
=====================================================================
Parameter estimation using grid search with a nested cross-validation
=====================================================================
This examples shows how a classifier is optimized by "nested"
cross-validation, which is done using the
:class:`sklearn.grid_search.GridSearchCV` object on a development set
that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5, scoring=score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_estimator_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() / 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/ipython-2.2.0-py2.7.egg/IPython/extensions/sympyprinting.py | 12 | 5609 | """
A print function that pretty prints sympy Basic objects.
:moduleauthor: Brian Granger
Usage
=====
Once the extension is loaded, Sympy Basic objects are automatically
pretty-printed.
As of SymPy 0.7.2, maintenance of this extension has moved to SymPy under
sympy.interactive.ipythonprinting, any modifications to account for changes to
SymPy should be submitted to SymPy rather than changed here. This module is
maintained here for backwards compatablitiy with old SymPy versions.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from IPython.lib.latextools import latex_to_png
from IPython.utils.py3compat import string_types
try:
from sympy import pretty, latex
except ImportError:
pass
import warnings
#-----------------------------------------------------------------------------
# Definitions of special display functions for use with IPython
#-----------------------------------------------------------------------------
def print_basic_unicode(o, p, cycle):
"""A function to pretty print sympy Basic objects."""
if cycle:
return p.text('Basic(...)')
out = pretty(o, use_unicode=True)
if '\n' in out:
p.text(u'\n')
p.text(out)
def print_png(o):
"""
A function to display sympy expression using inline style LaTeX in PNG.
"""
s = latex(o, mode='inline')
# mathtext does not understand certain latex flags, so we try to replace
# them with suitable subs.
s = s.replace('\\operatorname','')
s = s.replace('\\overline', '\\bar')
png = latex_to_png(s)
return png
def print_display_png(o):
"""
A function to display sympy expression using display style LaTeX in PNG.
"""
s = latex(o, mode='plain')
s = s.strip('$')
# As matplotlib does not support display style, dvipng backend is
# used here.
png = latex_to_png(s, backend='dvipng', wrap=True)
return png
def can_print_latex(o):
"""
Return True if type o can be printed with LaTeX.
If o is a container type, this is True if and only if every element of o
can be printed with LaTeX.
"""
import sympy
if isinstance(o, (list, tuple, set, frozenset)):
return all(can_print_latex(i) for i in o)
elif isinstance(o, dict):
return all((isinstance(i, string_types) or can_print_latex(i)) and can_print_latex(o[i]) for i in o)
elif isinstance(o,(sympy.Basic, sympy.matrices.Matrix, int, long, float)):
return True
return False
def print_latex(o):
"""A function to generate the latex representation of sympy
expressions."""
if can_print_latex(o):
s = latex(o, mode='plain')
s = s.replace('\\dag','\\dagger')
s = s.strip('$')
return '$$%s$$' % s
# Fallback to the string printer
return None
_loaded = False
def load_ipython_extension(ip):
"""Load the extension in IPython."""
import sympy
# sympyprinting extension has been moved to SymPy as of 0.7.2, if it
# exists there, warn the user and import it
try:
import sympy.interactive.ipythonprinting
except ImportError:
pass
else:
warnings.warn("The sympyprinting extension in IPython is deprecated, "
"use 'from sympy import init_printing; init_printing()'")
ip.extension_manager.load_extension('sympy.interactive.ipythonprinting')
return
global _loaded
if not _loaded:
plaintext_formatter = ip.display_formatter.formatters['text/plain']
for cls in (object, str):
plaintext_formatter.for_type(cls, print_basic_unicode)
printable_containers = [list, tuple]
# set and frozen set were broken with SymPy's latex() function, but
# was fixed in the 0.7.1-git development version. See
# http://code.google.com/p/sympy/issues/detail?id=3062.
if sympy.__version__ > '0.7.1':
printable_containers += [set, frozenset]
else:
plaintext_formatter.for_type(cls, print_basic_unicode)
plaintext_formatter.for_type_by_name(
'sympy.core.basic', 'Basic', print_basic_unicode
)
plaintext_formatter.for_type_by_name(
'sympy.matrices.matrices', 'Matrix', print_basic_unicode
)
png_formatter = ip.display_formatter.formatters['image/png']
png_formatter.for_type_by_name(
'sympy.core.basic', 'Basic', print_png
)
png_formatter.for_type_by_name(
'sympy.matrices.matrices', 'Matrix', print_display_png
)
for cls in [dict, int, long, float] + printable_containers:
png_formatter.for_type(cls, print_png)
latex_formatter = ip.display_formatter.formatters['text/latex']
latex_formatter.for_type_by_name(
'sympy.core.basic', 'Basic', print_latex
)
latex_formatter.for_type_by_name(
'sympy.matrices.matrices', 'Matrix', print_latex
)
for cls in printable_containers:
# Use LaTeX only if every element is printable by latex
latex_formatter.for_type(cls, print_latex)
_loaded = True
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.