repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
alivecor/tensorflow | tensorflow/python/client/notebook.py | 109 | 4791 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Notebook front-end to TensorFlow.
When you run this binary, you'll see something like below, which indicates
the serving URL of the notebook:
The IPython Notebook is running at: http://127.0.0.1:8888/
Press "Shift+Enter" to execute a cell
Press "Enter" on a cell to go into edit mode.
Press "Escape" to go back into command mode and use arrow keys to navigate.
Press "a" in command mode to insert cell above or "b" to insert cell below.
Your root notebooks directory is FLAGS.notebook_dir
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import socket
import sys
from tensorflow.python.platform import app
# pylint: disable=g-import-not-at-top
# Official recommended way of turning on fast protocol buffers as of 10/21/14
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "cpp"
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION_VERSION"] = "2"
FLAGS = None
ORIG_ARGV = sys.argv
# Main notebook process calls itself with argv[1]="kernel" to start kernel
# subprocesses.
IS_KERNEL = len(sys.argv) > 1 and sys.argv[1] == "kernel"
def main(unused_argv):
sys.argv = ORIG_ARGV
if not IS_KERNEL:
# Drop all flags.
sys.argv = [sys.argv[0]]
# NOTE(sadovsky): For some reason, putting this import at the top level
# breaks inline plotting. It's probably a bug in the stone-age version of
# matplotlib.
from IPython.html.notebookapp import NotebookApp # pylint: disable=g-import-not-at-top
notebookapp = NotebookApp.instance()
notebookapp.open_browser = True
# password functionality adopted from quality/ranklab/main/tools/notebook.py
# add options to run with "password"
if FLAGS.password:
from IPython.lib import passwd # pylint: disable=g-import-not-at-top
notebookapp.ip = "0.0.0.0"
notebookapp.password = passwd(FLAGS.password)
else:
print ("\nNo password specified; Notebook server will only be available"
" on the local machine.\n")
notebookapp.initialize(argv=["--notebook-dir", FLAGS.notebook_dir])
if notebookapp.ip == "0.0.0.0":
proto = "https" if notebookapp.certfile else "http"
url = "%s://%s:%d%s" % (proto, socket.gethostname(), notebookapp.port,
notebookapp.base_project_url)
print("\nNotebook server will be publicly available at: %s\n" % url)
notebookapp.start()
return
# Drop the --flagfile flag so that notebook doesn't complain about an
# "unrecognized alias" when parsing sys.argv.
sys.argv = ([sys.argv[0]] +
[z for z in sys.argv[1:] if not z.startswith("--flagfile")])
from IPython.kernel.zmq.kernelapp import IPKernelApp # pylint: disable=g-import-not-at-top
kernelapp = IPKernelApp.instance()
kernelapp.initialize()
# Enable inline plotting. Equivalent to running "%matplotlib inline".
ipshell = kernelapp.shell
ipshell.enable_matplotlib("inline")
kernelapp.start()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--password",
type=str,
default=None,
help="""\
Password to require. If set, the server will allow public access. Only
used if notebook config file does not exist.\
""")
parser.add_argument(
"--notebook_dir",
type=str,
default="experimental/brain/notebooks",
help="root location where to store notebooks")
# When the user starts the main notebook process, we don't touch sys.argv.
# When the main process launches kernel subprocesses, it writes all flags
# to a tmpfile and sets --flagfile to that tmpfile, so for kernel
# subprocesses here we drop all flags *except* --flagfile, then call
# app.run(), and then (in main) restore all flags before starting the
# kernel app.
if IS_KERNEL:
# Drop everything except --flagfile.
sys.argv = ([sys.argv[0]] +
[x for x in sys.argv[1:] if x.startswith("--flagfile")])
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
ernfrid/skll | tests/test_preprocessing.py | 1 | 9383 | # License: BSD 3 clause
"""
Tests related to data preprocessing options with run_experiment.
:author: Michael Heilman ([email protected])
:author: Nitin Madnani ([email protected])
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import glob
import json
import os
import re
from io import open
from os.path import abspath, dirname, exists, join
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal, assert_almost_equal
from sklearn.feature_extraction import FeatureHasher
from sklearn.datasets.samples_generator import make_classification
from skll.data import FeatureSet, NDJWriter
from skll.experiments import run_configuration
from skll.learner import Learner, SelectByMinCount
from skll.learner import _DEFAULT_PARAM_GRIDS
from utils import fill_in_config_paths
_ALL_MODELS = list(_DEFAULT_PARAM_GRIDS.keys())
SCORE_OUTPUT_RE = re.compile(r'Objective Function Score \(Test\) = '
r'([\-\d\.]+)')
_my_dir = abspath(dirname(__file__))
def setup():
"""
Create necessary directories for testing.
"""
train_dir = join(_my_dir, 'train')
if not exists(train_dir):
os.makedirs(train_dir)
test_dir = join(_my_dir, 'test')
if not exists(test_dir):
os.makedirs(test_dir)
output_dir = join(_my_dir, 'output')
if not exists(output_dir):
os.makedirs(output_dir)
def tearDown():
"""
Clean up after tests.
"""
output_dir = join(_my_dir, 'output')
config_dir = join(_my_dir, 'configs')
train_dir = join(_my_dir, 'train')
test_dir = join(_my_dir, 'test')
for output_file in glob.glob(join(output_dir, 'test_class_map_*')):
os.unlink(output_file)
if exists(join(train_dir, 'test_class_map.jsonlines')):
os.unlink(join(train_dir, 'test_class_map.jsonlines'))
if exists(join(test_dir, 'test_class_map.jsonlines')):
os.unlink(join(test_dir, 'test_class_map.jsonlines'))
config_files = ['test_class_map.cfg',
'test_class_map_feature_hasher.cfg']
for cf in config_files:
if exists(join(config_dir, cf)):
os.unlink(join(config_dir, cf))
def test_SelectByMinCount():
""" Test SelectByMinCount feature selector """
m2 = [[0.001, 0.0, 0.0, 0.0],
[0.00001, -2.0, 0.0, 0.0],
[0.001, 0.0, 0.0, 4.0],
[0.0101, -200.0, 0.0, 0.0]]
# default should keep all nonzero features (i.e. ones that appear 1+ times)
feat_selector = SelectByMinCount()
expected = np.array([[0.001, 0.0, 0.0],
[0.00001, -2.0, 0.0],
[0.001, 0.0, 4.0],
[0.0101, -200.0, 0.0]])
assert_array_equal(feat_selector.fit_transform(np.array(m2)), expected)
assert_array_equal(feat_selector.fit_transform(
sp.csr_matrix(m2)).todense(),
expected)
# keep features that happen 2+ times
feat_selector = SelectByMinCount(min_count=2)
expected = np.array([[0.001, 0.0],
[0.00001, -2.0],
[0.001, 0.0],
[0.0101, -200.0]])
assert_array_equal(feat_selector.fit_transform(np.array(m2)), expected)
assert_array_equal(
feat_selector.fit_transform(sp.csr_matrix(m2)).todense(),
expected)
# keep features that happen 3+ times
feat_selector = SelectByMinCount(min_count=3)
expected = np.array([[0.001], [0.00001], [0.001], [0.0101]])
assert_array_equal(feat_selector.fit_transform(np.array(m2)), expected)
assert_array_equal(
feat_selector.fit_transform(sp.csr_matrix(m2)).todense(),
expected)
def make_class_map_data():
# Create training file
train_path = join(_my_dir, 'train', 'test_class_map.jsonlines')
ids = []
labels = []
features = []
class_names = ['beagle', 'cat', 'dachsund', 'cat']
for i in range(1, 101):
y = class_names[i % 4]
ex_id = "{}{}".format(y, i)
# note that f1 and f5 are missing in all instances but f4 is not
x = {"f2": i + 1, "f3": i + 2, "f4": i + 5}
ids.append(ex_id)
labels.append(y)
features.append(x)
train_fs = FeatureSet('train_class_map', ids, features=features,
labels=labels)
writer = NDJWriter(train_path, train_fs)
writer.write()
# Create test file
test_path = join(_my_dir, 'test', 'test_class_map.jsonlines')
ids = []
labels = []
features = []
for i in range(1, 51):
y = class_names[i % 4]
ex_id = "{}{}".format(y, i)
# f1 and f5 are not missing in any instances here but f4 is
x = {"f1": i, "f2": i + 2, "f3": i % 10, "f5": i * 2}
ids.append(ex_id)
labels.append(y)
features.append(x)
test_fs = FeatureSet('test_class_map', ids, features=features,
labels=labels)
writer = NDJWriter(test_path, test_fs)
writer.write()
def test_class_map():
"""
Test class maps
"""
make_class_map_data()
config_template_path = join(
_my_dir,
'configs',
'test_class_map.template.cfg')
config_path = fill_in_config_paths(config_template_path)
run_configuration(config_path, quiet=True)
with open(join(_my_dir, 'output', ('test_class_map_test_class_map_Logistic'
'Regression.results.json'))) as f:
outd = json.loads(f.read())
# outstr = f.read()
# logistic_result_score = float(
# SCORE_OUTPUT_RE.search(outstr).groups()[0])
logistic_result_score = outd[0]['score']
assert_almost_equal(logistic_result_score, 0.5)
def test_class_map_feature_hasher():
"""
Test class maps with feature hashing
"""
make_class_map_data()
config_template_path = join(_my_dir, 'configs',
'test_class_map_feature_hasher.template.cfg')
config_path = fill_in_config_paths(config_template_path)
run_configuration(config_path, quiet=True)
with open(join(_my_dir, 'output', ('test_class_map_test_class_map_'
'LogisticRegression.results.'
'json'))) as f:
# outstr = f.read()
outd = json.loads(f.read())
# logistic_result_score = float(
# SCORE_OUTPUT_RE.search(outstr).groups()[0])
logistic_result_score = outd[0]['score']
assert_almost_equal(logistic_result_score, 0.5)
def make_scaling_data(use_feature_hashing=False):
X, y = make_classification(n_samples=1000, n_classes=2,
n_features=5, n_informative=5,
n_redundant=0, random_state=1234567890)
# we want to arbitrary scale the various features to test the scaling
scalers = np.array([1, 10, 100, 1000, 10000])
X = X * scalers
# since we want to use SKLL's FeatureSet class, we need to
# create a list of IDs
ids = ['EXAMPLE_{}'.format(n) for n in range(1, 1001)]
# create a list of dictionaries as the features
feature_names = ['f{}'.format(n) for n in range(1, 6)]
features = []
for row in X:
features.append(dict(zip(feature_names, row)))
# split everything into training and testing portions
train_features, test_features = features[:800], features[800:]
train_y, test_y = y[:800], y[800:]
train_ids, test_ids = ids[:800], ids[800:]
vectorizer = FeatureHasher(n_features=4) if use_feature_hashing else None
train_fs = FeatureSet('train_scaling', train_ids,
features=train_features, labels=train_y,
vectorizer=vectorizer)
test_fs = FeatureSet('test_scaling', test_ids,
features=test_features, labels=test_y,
vectorizer=vectorizer)
return (train_fs, test_fs)
def check_scaling_features(use_feature_hashing=False, use_scaling=False):
train_fs, test_fs = make_scaling_data(use_feature_hashing=use_feature_hashing)
# create a Linear SVM with the value of scaling as specified
feature_scaling = 'both' if use_scaling else 'none'
learner = Learner('SGDClassifier', feature_scaling=feature_scaling,
pos_label_str=1)
# train the learner on the training set and test on the testing set
learner.train(train_fs)
test_output = learner.evaluate(test_fs)
fmeasures = [test_output[2][0]['F-measure'],
test_output[2][1]['F-measure']]
# these are the expected values of the f-measures, sorted
if not use_feature_hashing:
expected_fmeasures = ([0.77319587628865982, 0.78640776699029125] if
not use_scaling else
[0.94930875576036866, 0.93989071038251359])
else:
expected_fmeasures = ([0.42774566473988435, 0.5638766519823788] if
not use_scaling else
[0.87323943661971837, 0.85561497326203206])
assert_almost_equal(expected_fmeasures, fmeasures)
def test_scaling():
yield check_scaling_features, False, False
yield check_scaling_features, False, True
yield check_scaling_features, True, False
yield check_scaling_features, True, True
| bsd-3-clause |
joernhees/scikit-learn | sklearn/model_selection/tests/test_search.py | 6 | 51806 | """Test the search module"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from sklearn.externals.joblib._compat import PY3_OR_LATER
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.fixes import in1d
from sklearn.utils.fixes import sp_version
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.exceptions import NotFittedError
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import ParameterSampler
from sklearn.model_selection._validation import FitFailedWarning
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import Ridge, SGDClassifier
from sklearn.model_selection.tests.common import OneTimeSplitter
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the parameter search algorithms"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.classes_ = np.unique(Y)
return self
def predict(self, T):
return T.shape[0]
def transform(self, X):
return X + self.foo_param
def inverse_transform(self, X):
return X - self.foo_param
predict_proba = predict
predict_log_proba = predict
decision_function = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
assert_array_equal(grid_search.cv_results_["param_foo_param"].data,
[1, 2, 3])
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
def check_hyperparameter_searcher_with_fit_params(klass, **klass_kwargs):
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(expected_fit_params=['spam', 'eggs'])
searcher = klass(clf, {'foo_param': [1, 2, 3]}, cv=2, **klass_kwargs)
# The CheckingClassifer generates an assertion error if
# a parameter is missing or has length != len(X).
assert_raise_message(AssertionError,
"Expected fit parameter(s) ['eggs'] not seen.",
searcher.fit, X, y, spam=np.ones(10))
assert_raise_message(AssertionError,
"Fit parameter spam has length 1; expected 4.",
searcher.fit, X, y, spam=np.ones(1),
eggs=np.zeros(10))
searcher.fit(X, y, spam=np.ones(10), eggs=np.zeros(10))
def test_grid_search_with_fit_params():
check_hyperparameter_searcher_with_fit_params(GridSearchCV)
def test_random_search_with_fit_params():
check_hyperparameter_searcher_with_fit_params(RandomizedSearchCV, n_iter=1)
def test_grid_search_fit_params_deprecation():
# NOTE: Remove this test in v0.21
# Use of `fit_params` in the class constructor is deprecated,
# but will still work until v0.21.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(expected_fit_params=['spam'])
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]},
fit_params={'spam': np.ones(10)})
assert_warns(DeprecationWarning, grid_search.fit, X, y)
def test_grid_search_fit_params_two_places():
# NOTE: Remove this test in v0.21
# If users try to input fit parameters in both
# the constructor (deprecated use) and the `fit`
# method, we'll ignore the values passed to the constructor.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(expected_fit_params=['spam'])
# The "spam" array is too short and will raise an
# error in the CheckingClassifier if used.
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]},
fit_params={'spam': np.ones(1)})
expected_warning = ('Ignoring fit_params passed as a constructor '
'argument in favor of keyword arguments to '
'the "fit" method.')
assert_warns_message(RuntimeWarning, expected_warning,
grid_search.fit, X, y, spam=np.ones(10))
# Verify that `fit` prefers its own kwargs by giving valid
# kwargs in the constructor and invalid in the method call
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]},
fit_params={'spam': np.ones(10)})
assert_raise_message(AssertionError, "Fit parameter spam has length 1",
grid_search.fit, X, y, spam=np.ones(1))
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = search_no_scoring.score(X, y)
score_accuracy = search_accuracy.score(X, y)
score_no_score_auc = search_no_score_method_auc.score(X, y)
score_auc = search_auc.score(X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_grid_search_groups():
# Check if ValueError (when groups is None) propagates to GridSearchCV
# And also check if groups is correctly passed to the cv object
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
groups = rng.randint(0, 3, 15)
clf = LinearSVC(random_state=0)
grid = {'C': [1]}
group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2), GroupKFold(),
GroupShuffleSplit()]
for cv in group_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
assert_raise_message(ValueError,
"The 'groups' parameter should not be None.",
gs.fit, X, y)
gs.fit(X, y, groups=groups)
non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit()]
for cv in non_group_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
# Should not raise an error
gs.fit(X, y)
def test_classes__property():
# Test that classes_ property matches best_estimator_.classes_
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
Cs = [.1, 1, 10]
grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs})
grid_search.fit(X, y)
assert_array_equal(grid_search.best_estimator_.classes_,
grid_search.classes_)
# Test that regressors do not have a classes_ attribute
grid_search = GridSearchCV(Ridge(), {'alpha': [1.0, 2.0]})
grid_search.fit(X, y)
assert_false(hasattr(grid_search, 'classes_'))
# Test that the grid searcher has no classes_ attribute before it's fit
grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs})
assert_false(hasattr(grid_search, 'classes_'))
# Test that the grid searcher has no classes_ attribute without a refit
grid_search = GridSearchCV(LinearSVC(random_state=0),
{'C': Cs}, refit=False)
grid_search.fit(X, y)
assert_false(hasattr(grid_search, 'classes_'))
def test_trivial_cv_results_attr():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
def test_no_refit():
# Test that GSCV can be used for model selection alone without refitting
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(not hasattr(grid_search, "best_estimator_") and
hasattr(grid_search, "best_index_") and
hasattr(grid_search, "best_params_"))
# Make sure the predict/transform etc fns raise meaningfull error msg
for fn_name in ('predict', 'predict_proba', 'predict_log_proba',
'transform', 'inverse_transform'):
assert_raise_message(NotFittedError,
('refit=False. %s is available only after '
'refitting on the best parameters' % fn_name),
getattr(grid_search, fn_name), X)
def test_grid_search_error():
# Test that grid search will capture errors on data with different length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_when_param_grid_includes_range():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = None
if PY3_OR_LATER:
grid_search = GridSearchCV(clf, {'foo_param': range(1, 4)})
else:
grid_search = GridSearchCV(clf, {'foo_param': xrange(1, 4)})
grid_search.fit(X, y)
assert_equal(grid_search.best_estimator_.foo_param, 2)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raise_message(
ValueError,
"Parameter values for parameter (C) need to be a sequence"
"(but not a string) or np.ndarray.",
GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raise_message(
ValueError,
"Parameter values for parameter (C) need to be a non-empty sequence.",
GridSearchCV, clf, param_dict)
param_dict = {"C": "1,2,3"}
clf = SVC()
assert_raise_message(
ValueError,
"Parameter values for parameter (C) need to be a sequence"
"(but not a string) or np.ndarray.",
GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n_splits=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n_splits=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
@ignore_warnings
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "cv_results_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='fowlkes_mallows_score')
grid_search.fit(X, y)
# So can FMS ;)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
# test that repeated calls yield identical parameters
param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=3, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
if sp_version >= (0, 16):
param_distributions = {"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
def check_cv_results_array_types(cv_results, param_keys, score_keys):
# Check if the search `cv_results`'s array are of correct types
assert_true(all(isinstance(cv_results[param], np.ma.MaskedArray)
for param in param_keys))
assert_true(all(cv_results[key].dtype == object for key in param_keys))
assert_false(any(isinstance(cv_results[key], np.ma.MaskedArray)
for key in score_keys))
assert_true(all(cv_results[key].dtype == np.float64
for key in score_keys if not key.startswith('rank')))
assert_true(cv_results['rank_test_score'].dtype == np.int32)
def check_cv_results_keys(cv_results, param_keys, score_keys, n_cand):
# Test the search.cv_results_ contains all the required results
assert_array_equal(sorted(cv_results.keys()),
sorted(param_keys + score_keys + ('params',)))
assert_true(all(cv_results[key].shape == (n_cand,)
for key in param_keys + score_keys))
def check_cv_results_grid_scores_consistency(search):
# TODO Remove in 0.20
cv_results = search.cv_results_
res_scores = np.vstack(list([cv_results["split%d_test_score" % i]
for i in range(search.n_splits_)])).T
res_means = cv_results["mean_test_score"]
res_params = cv_results["params"]
n_cand = len(res_params)
grid_scores = assert_warns(DeprecationWarning, getattr,
search, 'grid_scores_')
assert_equal(len(grid_scores), n_cand)
# Check consistency of the structure of grid_scores
for i in range(n_cand):
assert_equal(grid_scores[i].parameters, res_params[i])
assert_array_equal(grid_scores[i].cv_validation_scores,
res_scores[i, :])
assert_array_equal(grid_scores[i].mean_validation_score, res_means[i])
def test_grid_search_cv_results():
X, y = make_classification(n_samples=50, n_features=4,
random_state=42)
n_splits = 3
n_grid_points = 6
params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]),
dict(kernel=['poly', ], degree=[1, 2])]
grid_search = GridSearchCV(SVC(), cv=n_splits, iid=False,
param_grid=params)
grid_search.fit(X, y)
grid_search_iid = GridSearchCV(SVC(), cv=n_splits, iid=True,
param_grid=params)
grid_search_iid.fit(X, y)
param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel')
score_keys = ('mean_test_score', 'mean_train_score',
'rank_test_score',
'split0_test_score', 'split1_test_score',
'split2_test_score',
'split0_train_score', 'split1_train_score',
'split2_train_score',
'std_test_score', 'std_train_score',
'mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time')
n_candidates = n_grid_points
for search, iid in zip((grid_search, grid_search_iid), (False, True)):
assert_equal(iid, search.iid)
cv_results = search.cv_results_
# Check if score and timing are reasonable
assert_true(all(cv_results['rank_test_score'] >= 1))
assert_true(all(cv_results[k] >= 0) for k in score_keys
if k is not 'rank_test_score')
assert_true(all(cv_results[k] <= 1) for k in score_keys
if 'time' not in k and
k is not 'rank_test_score')
# Check cv_results structure
check_cv_results_array_types(cv_results, param_keys, score_keys)
check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates)
# Check masking
cv_results = grid_search.cv_results_
n_candidates = len(grid_search.cv_results_['params'])
assert_true(all((cv_results['param_C'].mask[i] and
cv_results['param_gamma'].mask[i] and
not cv_results['param_degree'].mask[i])
for i in range(n_candidates)
if cv_results['param_kernel'][i] == 'linear'))
assert_true(all((not cv_results['param_C'].mask[i] and
not cv_results['param_gamma'].mask[i] and
cv_results['param_degree'].mask[i])
for i in range(n_candidates)
if cv_results['param_kernel'][i] == 'rbf'))
check_cv_results_grid_scores_consistency(search)
def test_random_search_cv_results():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# scipy.stats dists now supports `seed` but we still support scipy 0.12
# which doesn't support the seed. Hence the assertions in the test for
# random_search alone should not depend on randomization.
n_splits = 3
n_search_iter = 30
params = dict(C=expon(scale=10), gamma=expon(scale=0.1))
random_search = RandomizedSearchCV(SVC(), n_iter=n_search_iter,
cv=n_splits, iid=False,
param_distributions=params)
random_search.fit(X, y)
random_search_iid = RandomizedSearchCV(SVC(), n_iter=n_search_iter,
cv=n_splits, iid=True,
param_distributions=params)
random_search_iid.fit(X, y)
param_keys = ('param_C', 'param_gamma')
score_keys = ('mean_test_score', 'mean_train_score',
'rank_test_score',
'split0_test_score', 'split1_test_score',
'split2_test_score',
'split0_train_score', 'split1_train_score',
'split2_train_score',
'std_test_score', 'std_train_score',
'mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time')
n_cand = n_search_iter
for search, iid in zip((random_search, random_search_iid), (False, True)):
assert_equal(iid, search.iid)
cv_results = search.cv_results_
# Check results structure
check_cv_results_array_types(cv_results, param_keys, score_keys)
check_cv_results_keys(cv_results, param_keys, score_keys, n_cand)
# For random_search, all the param array vals should be unmasked
assert_false(any(cv_results['param_C'].mask) or
any(cv_results['param_gamma'].mask))
check_cv_results_grid_scores_consistency(search)
def test_search_iid_param():
# Test the IID parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(SVC(), param_grid={'C': [1, 10]}, cv=cv)
random_search = RandomizedSearchCV(SVC(), n_iter=2,
param_distributions={'C': [1, 10]},
cv=cv)
for search in (grid_search, random_search):
search.fit(X, y)
assert_true(search.iid)
test_cv_scores = np.array(list(search.cv_results_['split%d_test_score'
% s_i][0]
for s_i in range(search.n_splits_)))
train_cv_scores = np.array(list(search.cv_results_['split%d_train_'
'score' % s_i][0]
for s_i in range(search.n_splits_)))
test_mean = search.cv_results_['mean_test_score'][0]
test_std = search.cv_results_['std_test_score'][0]
train_cv_scores = np.array(list(search.cv_results_['split%d_train_'
'score' % s_i][0]
for s_i in range(search.n_splits_)))
train_mean = search.cv_results_['mean_train_score'][0]
train_std = search.cv_results_['std_train_score'][0]
# Test the first candidate
assert_equal(search.cv_results_['param_C'][0], 1)
assert_array_almost_equal(test_cv_scores, [1, 1. / 3.])
assert_array_almost_equal(train_cv_scores, [1, 1])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average and weighted std
expected_test_mean = 1 * 1. / 4. + 1. / 3. * 3. / 4.
expected_test_std = np.sqrt(1. / 4 * (expected_test_mean - 1) ** 2 +
3. / 4 * (expected_test_mean - 1. / 3.) **
2)
assert_almost_equal(test_mean, expected_test_mean)
assert_almost_equal(test_std, expected_test_std)
# For the train scores, we do not take a weighted mean irrespective of
# i.i.d. or not
assert_almost_equal(train_mean, 1)
assert_almost_equal(train_std, 0)
# once with iid=False
grid_search = GridSearchCV(SVC(),
param_grid={'C': [1, 10]},
cv=cv, iid=False)
random_search = RandomizedSearchCV(SVC(), n_iter=2,
param_distributions={'C': [1, 10]},
cv=cv, iid=False)
for search in (grid_search, random_search):
search.fit(X, y)
assert_false(search.iid)
test_cv_scores = np.array(list(search.cv_results_['split%d_test_score'
% s][0]
for s in range(search.n_splits_)))
test_mean = search.cv_results_['mean_test_score'][0]
test_std = search.cv_results_['std_test_score'][0]
train_cv_scores = np.array(list(search.cv_results_['split%d_train_'
'score' % s][0]
for s in range(search.n_splits_)))
train_mean = search.cv_results_['mean_train_score'][0]
train_std = search.cv_results_['std_train_score'][0]
assert_equal(search.cv_results_['param_C'][0], 1)
# scores are the same as above
assert_array_almost_equal(test_cv_scores, [1, 1. / 3.])
# Unweighted mean/std is used
assert_almost_equal(test_mean, np.mean(test_cv_scores))
assert_almost_equal(test_std, np.std(test_cv_scores))
# For the train scores, we do not take a weighted mean irrespective of
# i.i.d. or not
assert_almost_equal(train_mean, 1)
assert_almost_equal(train_std, 0)
def test_search_cv_results_rank_tie_breaking():
X, y = make_blobs(n_samples=50, random_state=42)
# The two C values are close enough to give similar models
# which would result in a tie of their mean cv-scores
param_grid = {'C': [1, 1.001, 0.001]}
grid_search = GridSearchCV(SVC(), param_grid=param_grid)
random_search = RandomizedSearchCV(SVC(), n_iter=3,
param_distributions=param_grid)
for search in (grid_search, random_search):
search.fit(X, y)
cv_results = search.cv_results_
# Check tie breaking strategy -
# Check that there is a tie in the mean scores between
# candidates 1 and 2 alone
assert_almost_equal(cv_results['mean_test_score'][0],
cv_results['mean_test_score'][1])
assert_almost_equal(cv_results['mean_train_score'][0],
cv_results['mean_train_score'][1])
try:
assert_almost_equal(cv_results['mean_test_score'][1],
cv_results['mean_test_score'][2])
except AssertionError:
pass
try:
assert_almost_equal(cv_results['mean_train_score'][1],
cv_results['mean_train_score'][2])
except AssertionError:
pass
# 'min' rank should be assigned to the tied candidates
assert_almost_equal(search.cv_results_['rank_test_score'], [1, 1, 3])
def test_search_cv_results_none_param():
X, y = [[1], [2], [3], [4], [5]], [0, 0, 0, 0, 1]
estimators = (DecisionTreeRegressor(), DecisionTreeClassifier())
est_parameters = {"random_state": [0, None]}
cv = KFold(random_state=0)
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv).fit(X, y)
assert_array_equal(grid_search.cv_results_['param_random_state'],
[0, None])
@ignore_warnings()
def test_search_cv_timing():
svc = LinearSVC(random_state=0)
X = [[1, ], [2, ], [3, ], [4, ]]
y = [0, 1, 1, 0]
gs = GridSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0)
rs = RandomizedSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0, n_iter=2)
for search in (gs, rs):
search.fit(X, y)
for key in ['mean_fit_time', 'std_fit_time']:
# NOTE The precision of time.time in windows is not high
# enough for the fit/score times to be non-zero for trivial X and y
assert_true(np.all(search.cv_results_[key] >= 0))
assert_true(np.all(search.cv_results_[key] < 1))
for key in ['mean_score_time', 'std_score_time']:
assert_true(search.cv_results_[key][1] >= 0)
assert_true(search.cv_results_[key][0] == 0.0)
assert_true(np.all(search.cv_results_[key] < 1))
def test_grid_search_correct_score_results():
# test that correct scores are used
n_splits = 3
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score, cv=n_splits)
cv_results = grid_search.fit(X, y).cv_results_
# Test scorer names
result_keys = list(cv_results.keys())
expected_keys = (("mean_test_score", "rank_test_score") +
tuple("split%d_test_score" % cv_i
for cv_i in range(n_splits)))
assert_true(all(in1d(expected_keys, result_keys)))
cv = StratifiedKFold(n_splits=n_splits)
n_splits = grid_search.n_splits_
for candidate_i, C in enumerate(Cs):
clf.set_params(C=C)
cv_scores = np.array(
list(grid_search.cv_results_['split%d_test_score'
% s][candidate_i]
for s in range(n_splits)))
for i, (train, test) in enumerate(cv.split(X, y)):
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, cv_scores[i])
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
grid_search_pickled = pickle.loads(pickle.dumps(grid_search))
assert_array_almost_equal(grid_search.predict(X),
grid_search_pickled.predict(X))
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
random_search_pickled = pickle.loads(pickle.dumps(random_search))
assert_array_almost_equal(random_search.predict(X),
random_search_pickled.predict(X))
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
res_params = grid_search.cv_results_['params']
for cand_i in range(len(res_params)):
est.set_params(**res_params[cand_i])
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(
correct_score,
grid_search.cv_results_['split%d_test_score' % i][cand_i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
res_params = random_search.cv_results_['params']
for cand_i in range(len(res_params)):
est.set_params(**res_params[cand_i])
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(
correct_score,
random_search.cv_results_['split%d_test_score'
% i][cand_i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
n_candidates = len(gs.cv_results_['params'])
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
def get_cand_scores(i):
return np.array(list(gs.cv_results_['split%d_test_score' % s][i]
for s in range(gs.n_splits_)))
assert all((np.all(get_cand_scores(cand_i) == 0.0)
for cand_i in range(n_candidates)
if gs.cv_results_['param_parameter'][cand_i] ==
FailingClassifier.FAILING_PARAMETER))
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
n_candidates = len(gs.cv_results_['params'])
assert all(np.all(np.isnan(get_cand_scores(cand_i)))
for cand_i in range(n_candidates)
if gs.cv_results_['param_parameter'][cand_i] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
def test_stochastic_gradient_loss_param():
# Make sure the predict_proba works when loss is specified
# as one of the parameters in the param_grid.
param_grid = {
'loss': ['log'],
}
X = np.arange(24).reshape(6, -1)
y = [0, 0, 0, 1, 1, 1]
clf = GridSearchCV(estimator=SGDClassifier(loss='hinge'),
param_grid=param_grid)
# When the estimator is not fitted, `predict_proba` is not available as the
# loss is 'hinge'.
assert_false(hasattr(clf, "predict_proba"))
clf.fit(X, y)
clf.predict_proba(X)
clf.predict_log_proba(X)
# Make sure `predict_proba` is not available when setting loss=['hinge']
# in param_grid
param_grid = {
'loss': ['hinge'],
}
clf = GridSearchCV(estimator=SGDClassifier(loss='hinge'),
param_grid=param_grid)
assert_false(hasattr(clf, "predict_proba"))
clf.fit(X, y)
assert_false(hasattr(clf, "predict_proba"))
def test_search_train_scores_set_to_false():
X = np.arange(6).reshape(6, -1)
y = [0, 0, 0, 1, 1, 1]
clf = LinearSVC(random_state=0)
gs = GridSearchCV(clf, param_grid={'C': [0.1, 0.2]},
return_train_score=False)
gs.fit(X, y)
def test_grid_search_cv_splits_consistency():
# Check if a one time iterable is accepted as a cv parameter.
n_samples = 100
n_splits = 5
X, y = make_classification(n_samples=n_samples, random_state=0)
gs = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.2, 0.3]},
cv=OneTimeSplitter(n_splits=n_splits,
n_samples=n_samples))
gs.fit(X, y)
gs2 = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.2, 0.3]},
cv=KFold(n_splits=n_splits))
gs2.fit(X, y)
def _pop_time_keys(cv_results):
for key in ('mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time'):
cv_results.pop(key)
return cv_results
# OneTimeSplitter is a non-re-entrant cv where split can be called only
# once if ``cv.split`` is called once per param setting in GridSearchCV.fit
# the 2nd and 3rd parameter will not be evaluated as no train/test indices
# will be generated for the 2nd and subsequent cv.split calls.
# This is a check to make sure cv.split is not called once per param
# setting.
np.testing.assert_equal(_pop_time_keys(gs.cv_results_),
_pop_time_keys(gs2.cv_results_))
# Check consistency of folds across the parameters
gs = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.1, 0.2, 0.2]},
cv=KFold(n_splits=n_splits, shuffle=True))
gs.fit(X, y)
# As the first two param settings (C=0.1) and the next two param
# settings (C=0.2) are same, the test and train scores must also be
# same as long as the same train/test indices are generated for all
# the cv splits, for both param setting
for score_type in ('train', 'test'):
per_param_scores = {}
for param_i in range(4):
per_param_scores[param_i] = list(
gs.cv_results_['split%d_%s_score' % (s, score_type)][param_i]
for s in range(5))
assert_array_almost_equal(per_param_scores[0],
per_param_scores[1])
assert_array_almost_equal(per_param_scores[2],
per_param_scores[3])
def test_transform_inverse_transform_round_trip():
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
grid_search.fit(X, y)
X_round_trip = grid_search.inverse_transform(grid_search.transform(X))
assert_array_equal(X, X_round_trip)
| bsd-3-clause |
beepee14/scikit-learn | examples/ensemble/plot_random_forest_embedding.py | 286 | 3531 | """
=========================================================
Hashing feature transformation using Totally Random Trees
=========================================================
RandomTreesEmbedding provides a way to map data to a
very high-dimensional, sparse representation, which might
be beneficial for classification.
The mapping is completely unsupervised and very efficient.
This example visualizes the partitions given by several
trees and shows how the transformation can also be used for
non-linear dimensionality reduction or non-linear classification.
Points that are neighboring often share the same leaf of a tree and therefore
share large parts of their hashed representation. This allows to
separate two concentric circles simply based on the principal components of the
transformed data.
In high-dimensional spaces, linear classifiers often achieve
excellent accuracy. For sparse binary data, BernoulliNB
is particularly well-suited. The bottom row compares the
decision boundary obtained by BernoulliNB in the transformed
space with an ExtraTreesClassifier forests learned on the
original data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_circles
from sklearn.ensemble import RandomTreesEmbedding, ExtraTreesClassifier
from sklearn.decomposition import TruncatedSVD
from sklearn.naive_bayes import BernoulliNB
# make a synthetic dataset
X, y = make_circles(factor=0.5, random_state=0, noise=0.05)
# use RandomTreesEmbedding to transform data
hasher = RandomTreesEmbedding(n_estimators=10, random_state=0, max_depth=3)
X_transformed = hasher.fit_transform(X)
# Visualize result using PCA
pca = TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
# Learn a Naive Bayes classifier on the transformed data
nb = BernoulliNB()
nb.fit(X_transformed, y)
# Learn an ExtraTreesClassifier for comparison
trees = ExtraTreesClassifier(max_depth=3, n_estimators=10, random_state=0)
trees.fit(X, y)
# scatter plot of original and reduced data
fig = plt.figure(figsize=(9, 8))
ax = plt.subplot(221)
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_title("Original Data (2d)")
ax.set_xticks(())
ax.set_yticks(())
ax = plt.subplot(222)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y, s=50)
ax.set_title("PCA reduction (2d) of transformed data (%dd)" %
X_transformed.shape[1])
ax.set_xticks(())
ax.set_yticks(())
# Plot the decision in original space. For that, we will assign a color to each
# point in the mesh [x_min, m_max] x [y_min, y_max].
h = .01
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# transform grid using RandomTreesEmbedding
transformed_grid = hasher.transform(np.c_[xx.ravel(), yy.ravel()])
y_grid_pred = nb.predict_proba(transformed_grid)[:, 1]
ax = plt.subplot(223)
ax.set_title("Naive Bayes on Transformed data")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
# transform grid using ExtraTreesClassifier
y_grid_pred = trees.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
ax = plt.subplot(224)
ax.set_title("ExtraTrees predictions")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
plt.tight_layout()
plt.show()
| bsd-3-clause |
kazemakase/scikit-learn | examples/decomposition/plot_incremental_pca.py | 244 | 1878 | """
===============
Incremental PCA
===============
Incremental principal component analysis (IPCA) is typically used as a
replacement for principal component analysis (PCA) when the dataset to be
decomposed is too large to fit in memory. IPCA builds a low-rank approximation
for the input data using an amount of memory which is independent of the
number of input data samples. It is still dependent on the input data features,
but changing the batch size allows for control of memory usage.
This example serves as a visual check that IPCA is able to find a similar
projection of the data to PCA (to a sign flip), while only processing a
few samples at a time. This can be considered a "toy example", as IPCA is
intended for large datasets which do not fit in main memory, requiring
incremental approaches.
"""
print(__doc__)
# Authors: Kyle Kastner
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA, IncrementalPCA
iris = load_iris()
X = iris.data
y = iris.target
n_components = 2
ipca = IncrementalPCA(n_components=n_components, batch_size=10)
X_ipca = ipca.fit_transform(X)
pca = PCA(n_components=n_components)
X_pca = pca.fit_transform(X)
for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]:
plt.figure(figsize=(8, 8))
for c, i, target_name in zip("rgb", [0, 1, 2], iris.target_names):
plt.scatter(X_transformed[y == i, 0], X_transformed[y == i, 1],
c=c, label=target_name)
if "Incremental" in title:
err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean()
plt.title(title + " of iris dataset\nMean absolute unsigned error "
"%.6f" % err)
else:
plt.title(title + " of iris dataset")
plt.legend(loc="best")
plt.axis([-4, 4, -1.5, 1.5])
plt.show()
| bsd-3-clause |
droundy/deft | papers/histogram/figs/plot-sticky-wall.py | 1 | 2698 | #!/usr/bin/python2
import matplotlib, sys
if 'show' not in sys.argv:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy
matplotlib.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
matplotlib.rc('text', usetex=True)
import readnew
if len(sys.argv) < 5:
print(("Usage: python {} 1.3 0.22 100 10".format(sys.argv[0])))
exit(1)
ww = float(sys.argv[1])
#arg ww = [1.3]
ff = float(sys.argv[2])
#arg ff = [0.1, 0.2, 0.3]
lenx = float(sys.argv[3])
#arg lenx = [50, 80, 100]
lenyz = float(sys.argv[4])
#arg lenyz = [10]
plt.figure()
Ts = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 10.0]
colors = { 0.1: 'r',
0.5: 'k',
0.6: 'y',
0.7: 'm',
0.8: 'g',
0.9: 'b',
1.0: 'r',
5.0: 'k',
10.0: 'c',
}
def color(T):
try:
return colors[T]
except:
return ''
lines = ['-', '--', ':', '-.', '.']
first_method = True
the_first_method = ''
methods = [ '-tmi3', '-toe3', '-tmmc', '-satmmc', '-sad']#, '-wltmmc-0.8-1e-10'] #, '-tmi']
first_temperature = [True]*len(methods)
for i in range(len(methods)):
method = methods[i]
fbase = 'data/lv/ww%.2f-ff%.2f-%gx%g%s' % (ww, ff, lenx, lenyz, method)
fname = fbase + '-density.dat'
try:
minT = readnew.minT(fname)
convergedT = readnew.convergedT(fname)
for T in Ts:
if T >= minT and T >= convergedT*1.0:
density, x = readnew.density_x(fbase, T)
plt.plot(x/2, density, color(T)+lines[i])
if first_method or method == the_first_method:
if first_temperature[i]:
plt.plot(x/2, density, color(T)+lines[i],
label='T=%g %s (converged to %.2g)' % (T, method[1:], convergedT))
first_temperature[i] = False
else:
plt.plot(x/2, density, color(T)+lines[i], label='T=%g' % T)
the_first_method = method
first_method = False
elif first_temperature[i]:
plt.plot(x/2, density, color(T)+lines[i],
label='T=%g %s (converged to %.2g)' % (T, method[1:], convergedT))
first_temperature[i] = False
else:
plt.plot(x/2, density, color(T)+lines[i])
except:
pass
plt.ylim(0)
plt.xlabel(r'$z/\sigma$')
plt.ylabel(r'$\eta$')
plt.legend(loc='best')
plt.title(r'$\eta(z)$ with $\lambda = %g$ and $\eta=%g$' % (ww, ff))
plt.savefig('figs/sticky-wall-ww%.2f-ff%.2f-%gx%g.pdf' % (ww, ff, lenx, lenyz))
plt.show()
| gpl-2.0 |
google/dl_bounds | dl_bounds/src/results.py | 1 | 30995 | # coding=utf-8
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements visualization of exprimental results."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from dl_bounds.src.pysqlite_ext import SqliteDB
from matplotlib import pyplot as plt
import numpy as np
import tensorflow as tf
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_enum("do", "plot_hessian_vs_margin",
["plot_hessian_vs_margin", "table_norms", "table_phased"],
"Type of result to plot.")
tf.flags.DEFINE_string("db", None, "Database location.")
tf.flags.DEFINE_string("dataset", None, "Dataset.")
tf.flags.DEFINE_string("network", None, "Network.")
tf.flags.DEFINE_string("groupby_param", None, "Parameter name.")
tf.flags.DEFINE_string("pdf", None, "PDF filename to plot to.")
tf.flags.DEFINE_boolean("show", False, "Show plot.")
tf.flags.mark_flag_as_required("do")
tf.flags.mark_flag_as_required("db")
tf.flags.mark_flag_as_required("dataset")
tf.flags.mark_flag_as_required("network")
tf.flags.mark_flag_as_required("groupby_param")
class Results(object):
"""Retrieves results from Sqlite database."""
def __init__(self, db_filename, mean_metrics=False):
"""Constructor.
Args:
db_filename: path to sqlite3 database.
mean_metrics: return means of metrics over groupby parameter,
passed to getters.
"""
self.db_filename = db_filename
self.common_metrics = [
"train_error",
"val_error",
"train_zero_one_error",
"val_zero_one_error",
]
self.norm_metrics = [
"weight_l2_norms", "path_l2_norms", "spectral_products",
"spectral_complexities", "cond_numbers",
"ratios_of_mean_deep_embeddings", "ratios_of_mean_shallow_embeddings",
"soft_margins", "weight_variance", "weight_entropy",
"train_grad_norm", "val_grad_norm",
]
self.mean_metrics = mean_metrics
@staticmethod
def records_to_dict(rs, records, name):
rs[name] = np.vstack([rec[name] for rec in records])
def extract_metrics(self, rs, records, param_names):
for name in param_names:
Results.records_to_dict(rs, records, name)
def extract_common_metrics(self, rs, records):
self.extract_metrics(rs, records, self.common_metrics)
if records:
rs["pass_numbers"] = records[0]["pass_numbers"]
def extract_margins(self, rs, records, margin_eps_index=-1):
if records:
if records[0]["soft_margins"].ndim == 3:
rs["sq_margin"] = np.vstack(
[rec["soft_margins"][:, :, margin_eps_index]**2 for rec in records])
elif records[0]["soft_margins"].ndim == 2:
rs["sq_margin"] = np.vstack(
[rec["soft_margins"][:, margin_eps_index]**2 for rec in records])
def get_metrics(self,
dataset,
groupby_param_name,
metrics,
bad_min,
extra_constraints=""):
"""Retrieves list of records, where each record is a dict.
Args:
dataset: name of a dataset.
groupby_param_name: parameter to group results by.
metrics: list of metric names to retrieve.
bad_min: [bool] retrieve results for "bad" minumum experiment.
extra_constraints: extra "WHERE" constraints.
Returns:
list of dicts, where each dict stands for a record.
"""
metrics.extend(self.common_metrics)
if self.mean_metrics:
tf.logging.info("Retrieving means of metrics.")
select_metrics = ["mean(%s, 0) as \"%s\"" % (s, s) for s in metrics]
select_metrics.extend(
["stddev(%s, 0) as \"stddev_%s\"" % (s, s) for s in metrics])
else:
select_metrics = metrics
args = dict(
dataset=dataset,
groupby_param_name=groupby_param_name,
metrics=", ".join(select_metrics),
metric_constraint=" and ".join(["%s is not null" % m for m in metrics]),
bad_min=int(bad_min),
extra_constraints=extra_constraints)
db = SqliteDB(os.path.join(self.db_filename))
if groupby_param_name in ["width", "depth"]:
args["groupby_param_name"] = "network"
sql_query = """
SELECT pass_numbers,
%(groupby_param_name)s,
%(metrics)s
FROM rs
WHERE dataset = "%(dataset)s"
AND bad_min = %(bad_min)s
AND %(metric_constraint)s
%(extra_constraints)s
GROUP by %(groupby_param_name)s
ORDER by %(groupby_param_name)s
""" % args
tf.logging.info(sql_query)
rs = db.execute(sql_query)
# Handling width and depth parameter (for MLP) in a special way
# i.e. parsing the name and convering into integer column
if groupby_param_name == "width":
for (i, rec) in enumerate(rs):
layer_widths = rec["network"].split("_")[1].split("-")
assert len(layer_widths) == 2
assert layer_widths[0] == layer_widths[1]
rs[i]["width"] = int(layer_widths[0])
rs.sort(key=lambda x: x["width"])
elif groupby_param_name == "depth":
for (i, rec) in enumerate(rs):
layer_widths = rec["network"].split("_")[1].split("-")
rs[i]["depth"] = int(len(layer_widths))
rs.sort(key=lambda x: x["depth"])
return rs
def get_sharpness(self, dataset, groupby_param_name, bad_min,
extra_constraints):
"""Get sharpness records.
Retrieves records with common metrics (e.g. training/testing error)
and sharpness metric.
Args:
dataset: name of a dataset.
groupby_param_name: parameter to group results by.
bad_min: [bool] retrieve results for "bad" minumum experiment.
extra_constraints: extra "WHERE" constraints.
Returns:
list of dicts, where each dict stands for a record.
"""
rs = dict()
records = self.get_metrics(dataset, groupby_param_name,
["sharpness", "alpha"], bad_min,
extra_constraints)
for rec in records:
alphas = rec["alpha"]
if alphas.ndim == 2:
alphas = alphas[0, :]
if records[0]["sharpness"].ndim == 3:
for i in range(len(alphas)):
rs["sharpness_%s" % alphas[i]] = np.vstack(
[rec["sharpness"][:, :, i].squeeze() for rec in records])
elif records[0]["sharpness"].ndim == 2:
for i in range(len(alphas)):
rs["sharpness_%s" % alphas[i]] = np.vstack(
[rec["sharpness"][:, i].squeeze() for rec in records])
return rs
def get_all_metrics(self,
dataset,
groupby_param_name,
bad_min=False,
extra_constraints=""):
"""Get records for all the metrics.
Args:
dataset: name of a dataset.
groupby_param_name: parameter to group results by.
bad_min: [bool] retrieve results for "bad" minumum experiment.
extra_constraints: extra "WHERE" constraints.
Returns:
list of dicts, where each dict stands for a record.
"""
# Pulling norm-metrics
records = self.get_metrics(dataset, groupby_param_name, self.norm_metrics,
bad_min, extra_constraints)
rs = dict()
self.extract_common_metrics(rs, records)
self.extract_metrics(rs, records, self.norm_metrics)
self.extract_metrics(rs, records, [groupby_param_name])
self.extract_margins(rs, records)
# Pulling sharpness
sharpness_rs = self.get_sharpness(dataset, groupby_param_name, bad_min,
extra_constraints)
rs.update(sharpness_rs)
# Pulling Hessian spectral norm
hessian_records = self.get_metrics(dataset, groupby_param_name,
["hessian_top_sv_means"], bad_min,
extra_constraints)
self.extract_metrics(rs, hessian_records, ["hessian_top_sv_means"])
return rs
def get_hessian(self,
dataset,
groupby_param_name,
bad_min=False,
extra_constraints=""):
"""Get Hessian spectral norm records.
Retrieves records with common metrics (e.g. training/testing error)
and the Hessian spectral norm metric.
Args:
dataset: name of a dataset.
groupby_param_name: parameter to group results by.
bad_min: [bool] retrieve results for "bad" minumum experiment.
extra_constraints: extra "WHERE" constraints.
Returns:
list of dicts, where each dict stands for a record.
"""
records = self.get_metrics(
dataset, groupby_param_name,
["hessian_top_sv_means", "soft_margins", "train_grad_norm"], bad_min,
extra_constraints)
rs = dict()
self.extract_common_metrics(rs, records)
self.extract_metrics(rs, records, ["hessian_top_sv_means",
"stddev_hessian_top_sv_means",
"train_grad_norm"])
self.extract_metrics(rs, records, [groupby_param_name])
self.extract_margins(rs, records)
return rs
class MetricTable(object):
"""Implements conversion of metric results to a LaTeX table."""
def __init__(self,
db_filename,
dataset,
groupby_param_name,
network,
extra_constraints=""):
"""Constructor.
Args:
db_filename: path to sqlite3 database.
dataset: name of a dataset.
groupby_param_name: parameter to group results by.
network: network name.
extra_constraints: extra "WHERE" constraints.
"""
rs = Results(db_filename)
extra_constraints_sql = ("and network like '%s' %s " % (network,
extra_constraints))
self.records_good_min = rs.get_all_metrics(
dataset,
groupby_param_name,
bad_min=False,
extra_constraints=extra_constraints_sql)
self.records_bad_min = rs.get_all_metrics(
dataset,
groupby_param_name,
bad_min=True,
extra_constraints=extra_constraints_sql)
self.n_params = self.records_good_min["sq_margin"].shape[0]
@staticmethod
def format_number(num):
"""Formats a float.
Args:
num: float value.
Returns:
if num is in [1e-2, 1e+3), returns a float with 10^-2 precision.
Otherwise returns value in scientific format.
"""
if 1e-2 <= num < 1e+3:
return "$%.2f$" % num
elif num == 0.0:
return "$0$"
else:
base, exponent = ("%.1e" % num).split("e")
return "$%s \\cdot 10^{%s}$" % (base, int(exponent))
@staticmethod
def extract_column(records_good_min_metric,
records_bad_min_metric,
records_good_min_metric_stddev=None,
records_bad_min_metric_stddev=None,
bold_col=True):
"""Formats a column of a LaTeX table.
Given a numpy array of records corresponding to good-minumum experiment,
and a bad one, formats these into two adjacent columns.
Highlights minimal and maximal value in the "bad" column.
Args:
records_good_min_metric: numpy array of values from a "good" experiment.
records_bad_min_metric: numpy array of values from a "bad" experiment.
records_good_min_metric_stddev: stddev of "good" experiment.
records_bad_min_metric_stddev: stddev of "bad" experiment.
bold_col: bolden max. and min. values in the "bad" column
otherwise, bolden max. value in the good/bad pair
Returns:
a string in LaTeX format.
"""
min_i_bad_min_metric = np.argmin(records_bad_min_metric)
max_i_bad_min_metric = np.argmax(records_bad_min_metric)
column = []
for (i, z) in enumerate(
zip(records_good_min_metric, records_bad_min_metric)):
zs = map(MetricTable.format_number, z)
if records_bad_min_metric_stddev:
z_stddev = (
MetricTable.format_number(records_good_min_metric_stddev[i]),
MetricTable.format_number(records_bad_min_metric_stddev[i])
)
zs[0] = "%s $\\pm$ %s" % (zs[0], z_stddev[0])
zs[1] = "%s $\\pm$ %s" % (zs[1], z_stddev[1])
if bold_col:
if min_i_bad_min_metric == i or max_i_bad_min_metric == i:
column.append("%s & \\boldmath{%s}" % zs)
else:
column.append("%s & %s" % tuple(map(MetricTable.format_number, z)))
else:
if z[0] > z[1]:
column.append("\\boldmath{%s} & %s" % tuple(zs))
elif z[0] < z[1]:
column.append("%s & \\boldmath{%s}" % tuple(zs))
else:
column.append("%s & %s" % tuple(zs))
return column
@staticmethod
def format_good_bad_table(corner_label, col_labels, row_labels,
rows, print_full_doc):
"""Formats a table with every column split for "good" and "bad" metric.
Args:
corner_label: a label of the top left corner.
col_labels: column labels.
row_labels: row labels.
rows: row content, must be 2 * # of columns.
print_full_doc: format full LaTeX doc., ready to compilation.
Returns:
LaTeX formatted string.
"""
n_cols = len(col_labels)
table_lines = []
if print_full_doc:
table_lines.append(r"\documentclass{article}")
table_lines.append(
r"\usepackage[a4paper, landscape, margin=2mm]{geometry}")
table_lines.append(
r"\usepackage{amsmath,amssymb,amsfonts,amsthm,graphics}")
table_lines.append(r"\begin{document}")
table_lines.append(r"\begin{center}")
table_lines.append(r"\begin{table}")
table_lines.append(r"\scalebox{0.6}{")
table_lines.append(r"\begin{tabular}{%s|}" % ("|l" *
(2 * (n_cols) + 1)))
heads = ([corner_label] + [
r"\multicolumn{2}{|p{3cm}|}{%s}" % col_label
for col_label in col_labels
])
table_lines.append(r"\hline")
table_lines.append(" & ".join(heads) + r" \\")
table_lines.append(r"\hline")
table_lines.append(" & ".join([""] + ["Good", "Bad"] *
(n_cols)) + r"\\ ")
table_lines.append(r"\hline")
table_lines.append("\n".join([
" & ".join([row_labels[i]] + list(row)) + r" \\" + "\n\\hline"
for (i, row) in enumerate(rows)
]))
table_lines.append(r"\end{tabular}")
table_lines.append(r"}")
table_lines.append(r"\end{table}")
if print_full_doc:
table_lines.append(r"\end{center}")
table_lines.append(r"\end{document}")
return "\n".join(table_lines)
def print(self, metrics, normalize_by_margin=False, print_full_doc=False):
"""Formats a latex table for a given set of metrics.
Args:
metrics: list of metric names.
normalize_by_margin: normalize metrics by the squared soft margin.
print_full_doc: wrap LaTeX table into the markup ready for compilation.
Returns:
a table formatted as a LaTeX string.
"""
pass_numbers = self.records_good_min["pass_numbers"]
columns = []
good_sq_soft_margin = self.records_good_min["sq_margin"].squeeze()
bad_sq_soft_margin = self.records_bad_min["sq_margin"].squeeze()
# Subselect index is passed whenever one record is a vector
# e.g. eigenvalues of all layers
for (metric_name, metric_label, subselect_index) in metrics:
records_good_min_metric = self.records_good_min[metric_name].squeeze()
records_bad_min_metric = self.records_bad_min[metric_name].squeeze()
# Backwards compatibility
# older experiments recorded multiple "bad" minima snapshots
# here we are keeping only the last one
if records_bad_min_metric.ndim == 2:
records_bad_min_metric = records_bad_min_metric[-1, :]
if subselect_index:
records_good_min_metric = records_good_min_metric[:, subselect_index]
records_bad_min_metric = records_bad_min_metric[:, subselect_index]
if normalize_by_margin and (metric_name != "sq_margin"):
records_good_min_metric /= good_sq_soft_margin
records_bad_min_metric /= bad_sq_soft_margin
column = MetricTable.extract_column(records_good_min_metric,
records_bad_min_metric)
columns.append(column)
val_error_good = self.records_good_min["val_error"].squeeze()
val_error_bad = self.records_bad_min["val_error"].squeeze()
train_error_good = self.records_good_min["train_error"].squeeze()
train_error_bad = self.records_bad_min["train_error"].squeeze()
val_zero_one_error_good = (
self.records_good_min["val_zero_one_error"].squeeze())
val_zero_one_error_bad = (
self.records_bad_min["val_zero_one_error"].squeeze())
train_zero_one_error_good = (
self.records_good_min["train_zero_one_error"].squeeze())
train_zero_one_error_bad = (
self.records_bad_min["train_zero_one_error"].squeeze())
# Backwards compatibility again
if val_error_bad.ndim == 2:
val_error_bad = val_error_bad[-1, :]
train_error_bad = train_error_bad[-1, :]
val_zero_one_error_bad = val_zero_one_error_bad[-1, :]
train_zero_one_error_bad = train_zero_one_error_bad[-1, :]
error_metrics_all = [[(train_error_bad, train_error_good, "Train error"),
(val_error_bad, val_error_good, "Val error")],
[(train_zero_one_error_bad, train_zero_one_error_good,
"Train error (0/1)"),
(val_zero_one_error_bad, val_zero_one_error_good,
"Val error (0/1)")]]
error_labels = []
for (i, error_metrics) in enumerate(error_metrics_all):
for (metric_bad, metric_good, label) in error_metrics:
column = MetricTable.extract_column(metric_good, metric_bad)
columns.append(column)
error_labels.append(label)
rows = zip(*columns)
table_lines = []
if print_full_doc:
table_lines.append(r"\documentclass{article}")
table_lines.append(
r"\usepackage[a4paper, landscape, margin=2mm]{geometry}")
table_lines.append(
r"\usepackage{amsmath,amssymb,amsfonts,amsthm,graphics}")
table_lines.append(r"\begin{document}")
table_lines.append(r"\begin{center}")
table_lines.append(r"\begin{table}")
table_lines.append(r"\scalebox{0.6}{")
table_lines.append(r"\begin{tabular}{%s|}" % ("|l" *
(2 * (len(metrics) + 4) + 1)))
heads = (["Epoch"] + [
r"\multicolumn{2}{|p{3cm}|}{%s}" % metric_label
for (_, metric_label, _) in metrics
] + [
r"\multicolumn{2}{|p{3cm}|}{%s}" % error_label
for error_label in error_labels
])
table_lines.append(r"\hline")
table_lines.append(" & ".join(heads) + r" \\")
table_lines.append(r"\hline")
table_lines.append(" & ".join([""] + ["Good", "Bad"] *
(len(metrics) + 4)) + r"\\ ")
table_lines.append(r"\hline")
table_lines.append("\n".join([
" & ".join([str(pass_numbers[i])] + list(row)) + r" \\" + "\n\\hline"
for (i, row) in enumerate(rows)
]))
table_lines.append(r"\end{tabular}")
table_lines.append(r"}")
table_lines.append(r"\end{table}")
if print_full_doc:
table_lines.append(r"\end{center}")
table_lines.append(r"\end{document}")
return "\n".join(table_lines)
class MetricVsParamTable(object):
def __init__(self, db_filename, dataset, network, groupby_param_name):
rs = Results(db_filename, mean_metrics=True)
"""Constructor.
db_filename: path to sqlite3 database.
dataset: dataste name.
network: network name.
groupby_param_name: parameter to group results by.
network: network name.
"""
self.records_good_min = rs.get_all_metrics(
dataset,
groupby_param_name,
bad_min=False,
extra_constraints="and network like '%s' " % network)
self.records_bad_min = rs.get_all_metrics(
dataset,
groupby_param_name,
bad_min=True,
extra_constraints="and network like '%s' " % network)
self.groupby_param_name = groupby_param_name
self.n_params = self.records_good_min["sq_margin"].shape[0]
def print(self, print_full_doc):
metrics = [
("weight_l2_norms", "Weight L2 norm"),
("path_l2_norms", "Path L2 norm"),
("spectral_products", "Lip. const of the network"),
("spectral_complexities", "Spectral complexity"),
("hessian_top_sv_means", "Hessian spectral norm"),
("sharpness_0.0005", "Sharpness\\newline (alpha=0.0005)"),
("train_grad_norm", "Train grad. norm"),
("train_error", "Train error"),
("val_error", "Val. error"),
("train_zero_one_error", "Train (0/1) error"),
("val_zero_one_error", "Val. (0/1) error")
]
columns = []
row_labels = [m[1] for m in metrics]
col_labels = [x[0] for x in self.records_good_min[self.groupby_param_name]]
for param_index in range(self.n_params):
col_metric_values_good = []
col_metric_values_bad = []
for (metric_name, _) in metrics:
metric_values_good = self.records_good_min[metric_name][param_index, -1]
metric_values_bad = self.records_bad_min[metric_name][param_index, -1]
col_metric_values_good.append(metric_values_good)
col_metric_values_bad.append(metric_values_bad)
column = MetricTable.extract_column(np.array(col_metric_values_good),
np.array(col_metric_values_bad),
bold_col=False)
columns.append(column)
rows = zip(*columns)
table_text = MetricTable.format_good_bad_table(
self.groupby_param_name.replace("_", " "),
col_labels,
row_labels,
rows,
print_full_doc)
return table_text
class HessianVsMarginPlot(object):
"""Plots experimental results with Hessian spectral norm and margin."""
def __init__(self, db_filename, dataset, network, groupby_param_name,
figure_path):
rs = Results(db_filename, mean_metrics=True)
"""Constructor.
Arranges results into a plot table, where columns are
progressions of various metrics over epochs, and rows
are different settings of a "groupby" parameter (e.g. a learning rate).
db_filename: path to sqlite3 database.
dataset: dataste name.
network: network name.
groupby_param_name: parameter to group results by.
network: network name.
figure_path: path for a PDF file with resulting figure.
"""
self.records_good_min = rs.get_hessian(
dataset,
groupby_param_name,
bad_min=False,
extra_constraints="and network like '%s' " % network)
self.records_bad_min = rs.get_hessian(
dataset,
groupby_param_name,
bad_min=True,
extra_constraints="and network like '%s' " % network)
self.groupby_param_name = groupby_param_name
self.n_params = self.records_good_min["sq_margin"].shape[0]
self.figure_path = figure_path
def plot_one_setting(self, param_index, legend=True):
"""Plot results for one "groupby" parameter.
Args:
param_index: index of a "groupby" parameter.
legend: [bool] plot legend.
"""
pass_numbers = self.records_good_min["pass_numbers"]
hessian_sv_good = self.records_good_min["hessian_top_sv_means"][
param_index, :]
sq_margin_good = self.records_good_min["sq_margin"][param_index, :]
val_error_good = self.records_good_min["val_error"][param_index, :]
train_error_good = self.records_good_min["train_error"][param_index, :]
val_zero_one_error_good = self.records_good_min["val_zero_one_error"][
param_index, :]
train_zero_one_error_good = self.records_good_min["train_zero_one_error"][
param_index, :]
train_grad_norm_good = self.records_good_min["train_grad_norm"][
param_index, :]
hessian_sv_bad = self.records_bad_min["hessian_top_sv_means"][
param_index, :]
sq_margin_bad = self.records_bad_min["sq_margin"][param_index, :]
val_error_bad = self.records_bad_min["val_error"][param_index, :]
train_error_bad = self.records_bad_min["train_error"][param_index, :]
val_zero_one_error_bad = self.records_bad_min["val_zero_one_error"][
param_index, :]
train_zero_one_error_bad = self.records_bad_min["train_zero_one_error"][
param_index, :]
train_grad_norm_bad = self.records_bad_min["train_grad_norm"][
param_index, :]
self.n_cell_rows = 5
title = "%s = %s" % (self.groupby_param_name,
self.records_good_min[
self.groupby_param_name][param_index][0])
self.plot_cell(
param_index + 1,
pass_numbers,
hessian_sv_bad,
hessian_sv_good,
"||Hessian (bad)||_2",
"||Hessian (good)||_2",
title,
plotter=plt.loglog,
add_legend=legend)
self.plot_cell(
param_index + 1,
pass_numbers,
train_grad_norm_bad,
train_grad_norm_good,
"||grad (bad)||",
"||grad (good)||",
title,
plotter=plt.loglog,
add_legend=legend)
self.plot_cell(
self.n_params + param_index + 1,
pass_numbers,
1.0 / sq_margin_bad,
1.0 / sq_margin_good,
"Inv. of margin^2 (bad)",
"Inv. of margin^2 (good)",
title,
plotter=plt.loglog,
add_legend=legend)
self.plot_cell(
2 * self.n_params + param_index + 1,
pass_numbers,
train_error_bad,
train_error_good,
"Train error (bad)",
"Train error (good)",
title,
plotter=plt.semilogx,
add_legend=legend)
self.plot_cell(
2 * self.n_params + param_index + 1,
pass_numbers,
val_error_bad,
val_error_good,
"Val error (bad)",
"Val error (good)",
title,
plotter=plt.semilogx,
add_legend=legend)
self.plot_cell(
3 * self.n_params + param_index + 1,
pass_numbers,
train_zero_one_error_bad,
train_zero_one_error_good,
"Train (0/1) error (bad)",
"Train (0/1) error (good)",
title,
plotter=plt.semilogx,
add_legend=legend)
self.plot_cell(
3 * self.n_params + param_index + 1,
pass_numbers,
val_zero_one_error_bad,
val_zero_one_error_good,
"Val 0/1 error (bad)",
"Val 0/1 error (good)",
title,
plotter=plt.semilogx,
add_legend=legend)
def plot_cell(self,
i,
x,
y_bad,
y_good,
label_bad,
label_good,
title,
plotter=plt.plot,
add_legend=True):
"""Plot one cell of a plot table.
Args:
i: subplot index of a cell.
x: values on the x axis.
y_bad: values on the y axis, for a "bad" experiment.
y_good: values on the y axis, for a "good" experiment.
label_bad: corresponding label.n
label_good: corresponding label.
title: title of a plot.
plotter: matplotlib plotting function.
add_legend: [bool] plot a legend.
"""
if any(np.isnan(y_bad)) or any(np.isnan(y_good)):
tf.logging.info("i=%d, Cannot plot: contains NaNs." % i)
return
ax = plt.subplot(self.n_cell_rows, self.n_params, i)
ax.set_title(title)
plot_rs = plotter(x, y_bad, linewidth=3, label=label_bad)
plotter(
x,
y_good,
linewidth=3,
label=label_good,
color=plot_rs[0].get_color(),
linestyle="--")
if add_legend:
legend = plt.legend(loc="best", fontsize="small")
legend = legend.get_frame().set_alpha(0.5)
plt.grid(True)
def plot(self):
plt.figure(figsize=(self.n_params * 10, 10))
for i in range(self.n_params):
self.plot_one_setting(i, legend=(i == 0))
tf.logging.info("Saving to %s", self.figure_path)
plt.savefig(self.figure_path, tight_layout=True, bbox_inches="tight")
def pdflatex(tex, pdf_path):
_, fname = tempfile.mkstemp()
open(fname, "wt").write(tex)
shell = ("pdflatex --jobname='%s' --output-directory='%s' %s" %
(os.path.basename(pdf_path).split(".")[0], os.path.dirname(pdf_path),
fname))
pdflatex_out = os.popen(shell).read()
tf.logging.info(pdflatex_out)
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
if FLAGS.do == "plot_hessian_vs_margin":
plot = HessianVsMarginPlot(FLAGS.db, FLAGS.dataset, FLAGS.network,
FLAGS.groupby_param, FLAGS.pdf)
plot.plot()
if FLAGS.show:
plt.show()
elif FLAGS.do.startswith("table_"):
if FLAGS.do == "table_norms":
metrics = [("weight_l2_norms", "Weight L2 norm", None),
("path_l2_norms", "Path L2 norm", None),
("spectral_products", "Prod. of layer\\newline spectral norms",
None),
("spectral_complexities", "Spectral\\newline complexity",
None),
("train_grad_norm", "Train grad. norm", None),
("val_grad_norm", "Val grad. norm", None)]
elif FLAGS.do == "table_phased":
metrics = [("weight_variance", "Weight variance", None),
("hessian_top_sv_means", "Hessian\\newline spectral norm",
None),
("train_grad_norm", "Train grad. norm", None),
("val_grad_norm", "Val grad. norm", None),
("sharpness_0.0005",
"Sharpness\\newline (alpha=0.0005)", None),
("weight_entropy", "Weight entropy\\newline($10^3$ bin hist)",
None), ("sq_margin", "Squared\\newline soft margin", None)]
table = MetricTable(FLAGS.db, FLAGS.dataset, "bad_min", FLAGS.network,
"and learning_rate=0.05")
table_text = table.print(metrics, print_full_doc=FLAGS.pdf)
if FLAGS.pdf:
pdflatex(table_text, FLAGS.pdf)
else:
print(table_text)
if __name__ == "__main__":
tf.app.run(main)
| apache-2.0 |
puruckertom/ubertool | ubertool/fellerarley/fellerarley_exe.py | 2 | 3681 | import numpy as np
import os.path
import pandas as pd
import sys
#find parent directory and import base (travis)
parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
sys.path.append(parentddir)
from base.uber_model import UberModel, ModelSharedInputs
#print(sys.path)
#print(os.path)
class FellerarleyInputs(ModelSharedInputs):
"""
Input class for Fellerarley.
#(N_o,K,rho,q,E,T)
"""
def __init__(self):
"""Class representing the inputs for Fellerarley"""
super(FellerarleyInputs, self).__init__()
self.init_pop_size = pd.Series([], dtype="float")
self.growth_rate = pd.Series([], dtype="float")
self.time_steps = pd.Series([], dtype="float")
self.death_rate = pd.Series([], dtype="float")
self.iteration = pd.Series([], dtype="float")
class FellerarleyOutputs(object):
"""
Output class for Fellerarley.
"""
def __init__(self):
"""Class representing the outputs for Fellerarley"""
super(FellerarleyOutputs, self).__init__()
#dictionary of time, outputs
self.out_pop_time_series = []
class Fellerarley(UberModel, FellerarleyInputs, FellerarleyOutputs):
"""
Fellerarley model for population growth.
"""
def __init__(self, pd_obj, pd_obj_exp):
"""Class representing the Fellerarley model and containing all its methods"""
super(Fellerarley, self).__init__()
self.pd_obj = pd_obj
self.pd_obj_exp = pd_obj_exp
self.pd_obj_out = None
def execute_model(self):
"""
Callable to execute the running of the model:
1) Populate input parameters
2) Create output DataFrame to hold the model outputs
3) Run the model's methods to generate outputs
4) Fill the output DataFrame with the generated model outputs
"""
self.populate_inputs(self.pd_obj, self)
self.pd_obj_out = self.populate_outputs(self)
self.run_methods()
self.fill_output_dataframe(self)
# Begin model methods
def run_methods(self):
""" Execute all algorithm methods for model logic """
try:
# dictionaries of population time series
self.batch_fellerarley()
except Exception as e:
print(str(e))
def fellerarley_growth(self, idx):
#T=self.time_steps
#index_set = range(T+1)
index_set = range(self.time_steps[idx] + 1)
Ite=self.iteration
x = np.zeros((Ite,len(index_set)))
x_mu = np.zeros(len(index_set))
x_mu[0]=self.init_pop_size[idx]
rho=self.growth_rate[idx]/100
beta=self.death_rate[idx]/100
for i in range(0,Ite):
x[i][0]=self.init_pop_size[idx]
n=0
while n<index_set:
x_mu[n+1]=(1+rho-beta)*x_mu[n]
if x[i][n]<10000:
m=np.random.random(x[i][n])
m1=np.random.random(x[i][n])
n_birth=np.sum(m<rho)
n_death=np.sum(m1<beta)
x[i][n+1]=x[i][n]+n_birth-n_death
if x[i][n+1]<0:
x[i][n+1]=0
else:
x[i][n+1]=(1+rho-beta)*x[i][n]
n=n+1
t = range(0, self.time_steps[idx])
d = dict(zip(t, x))
self.out_pop_time_series[idx].append(d)
return
# x=x.tolist()
# x_mu=x_mu.tolist()
# return x, x_mu
def batch_fellerarley(self):
for idx in enumerate(self.init_pop_size):
self.fellerarley_growth(idx)
return | unlicense |
AndKe/MAVProxy | MAVProxy/modules/lib/live_graph.py | 6 | 3201 | #!/usr/bin/env python
"""
MAVProxy realtime graphing module, partly based on the wx graphing
demo by Eli Bendersky ([email protected])
http://eli.thegreenplace.net/files/prog_code/wx_mpl_dynamic_graph.py.txt
"""
import platform
from MAVProxy.modules.lib import mp_util
from MAVProxy.modules.lib import multiproc
class LiveGraph():
'''
a live graph object using wx and matplotlib
All of the GUI work is done in a child process to provide some insulation
from the parent mavproxy instance and prevent instability in the GCS
New data is sent to the LiveGraph instance via a pipe
'''
def __init__(self,
fields,
title='MAVProxy: LiveGraph',
timespan=20.0,
tickresolution=0.2,
colors=[ 'red', 'green', 'blue', 'orange', 'olive', 'cyan', 'magenta', 'brown',
'violet', 'purple', 'grey', 'black']):
self.fields = fields
self.colors = colors
self.title = title
self.timespan = timespan
self.tickresolution = tickresolution
self.values = [None]*len(self.fields)
self.parent_pipe,self.child_pipe = multiproc.Pipe()
self.close_graph = multiproc.Event()
self.close_graph.clear()
self.child = multiproc.Process(target=self.child_task)
self.child.start()
def child_task(self):
'''child process - this holds all the GUI elements'''
mp_util.child_close_fds()
import matplotlib, platform
if platform.system() != "Darwin":
# on MacOS we can't set WxAgg here as it conflicts with the MacOS version
matplotlib.use('WXAgg')
from MAVProxy.modules.lib import wx_processguard
from MAVProxy.modules.lib.wx_loader import wx
app = wx.App(False)
from MAVProxy.modules.lib import live_graph_ui
app.frame = live_graph_ui.GraphFrame(state=self)
app.frame.Show()
app.MainLoop()
def add_values(self, values):
'''add some data to the graph'''
if self.child.is_alive():
self.parent_pipe.send(values)
def close(self):
'''close the graph'''
self.close_graph.set()
if self.is_alive():
self.child.join(2)
def is_alive(self):
'''check if graph is still going'''
return self.child.is_alive()
if __name__ == "__main__":
multiproc.freeze_support()
# test the graph
import time, math
import live_graph
livegraph = live_graph.LiveGraph(['sin(t)', 'cos(t)', 'sin(t+1)',
'cos(t+1)', 'sin(t+2)', 'cos(t+2)',
'cos(t+1)', 'sin(t+2)', 'cos(t+2)', 'x'],
timespan=30,
title='Graph Test')
while livegraph.is_alive():
t = time.time()
livegraph.add_values([math.sin(t), math.cos(t),
math.sin(t+1), math.cos(t+1),
math.sin(t+1), math.cos(t+1),
math.sin(t+1), math.cos(t+1),
math.sin(t+2), math.cos(t+2)])
time.sleep(0.05)
| gpl-3.0 |
ycaihua/scikit-learn | sklearn/tree/export.py | 30 | 4529 | """
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Licence: BSD 3 clause
from ..externals import six
from . import _tree
def export_graphviz(decision_tree, out_file="tree.dot", feature_names=None,
max_depth=None):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
feature_names : list of strings, optional (default=None)
Names of each of the features.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def node_to_str(tree, node_id, criterion):
if not isinstance(criterion, six.string_types):
criterion = "impurity"
value = tree.value[node_id]
if tree.n_outputs == 1:
value = value[0, :]
if tree.children_left[node_id] == _tree.TREE_LEAF:
return "%s = %.4f\\nsamples = %s\\nvalue = %s" \
% (criterion,
tree.impurity[node_id],
tree.n_node_samples[node_id],
value)
else:
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X[%s]" % tree.feature[node_id]
return "%s <= %.4f\\n%s = %s\\nsamples = %s" \
% (feature,
tree.threshold[node_id],
criterion,
tree.impurity[node_id],
tree.n_node_samples[node_id])
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
out_file.write('%d [label="%s", shape="box"] ;\n' %
(node_id, node_to_str(tree, node_id, criterion)))
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
out_file.write('%d [label="(...)", shape="box"] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
out_file.write("digraph Tree {\n")
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
out_file.write("}")
finally:
if own_file:
out_file.close()
| bsd-3-clause |
Adai0808/scikit-learn | examples/cluster/plot_affinity_propagation.py | 349 | 2304 | """
=================================================
Demo of affinity propagation clustering algorithm
=================================================
Reference:
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
print(__doc__)
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
random_state=0)
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation(preference=-50).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
plt.plot(X[class_members, 0], X[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
KEHANG/AutoFragmentModeling | ipython/3. reporting/reactant_conversion.py | 1 | 4304 | #~/usr/bin/env python
#-*- coding: utf-8 -*-
import os
import numpy as np
import matplotlib.pyplot as plt
# set global settings
def init_plotting():
plt.rcParams['figure.figsize'] = (4, 3)
plt.rcParams['font.size'] = 8
plt.rcParams['font.family'] = 'Helvetica'
plt.rcParams['axes.labelsize'] = plt.rcParams['font.size']
plt.rcParams['axes.titlesize'] = 1.5*plt.rcParams['font.size']
plt.rcParams['legend.fontsize'] = plt.rcParams['font.size']
plt.rcParams['xtick.labelsize'] = plt.rcParams['font.size']
plt.rcParams['ytick.labelsize'] = plt.rcParams['font.size']
plt.rcParams['savefig.dpi'] = 2*plt.rcParams['savefig.dpi']
plt.rcParams['xtick.major.size'] = 3
plt.rcParams['xtick.minor.size'] = 3
plt.rcParams['xtick.major.width'] = 1
plt.rcParams['xtick.minor.width'] = 1
plt.rcParams['ytick.major.size'] = 3
plt.rcParams['ytick.minor.size'] = 3
plt.rcParams['ytick.major.width'] = 1
plt.rcParams['ytick.minor.width'] = 1
plt.rcParams['legend.frameon'] = True
plt.rcParams['legend.loc'] = 'best'
plt.rcParams['axes.linewidth'] = 1
plt.gca().spines['right'].set_color('none')
plt.gca().spines['top'].set_color('none')
plt.gca().xaxis.set_ticks_position('bottom')
plt.gca().yaxis.set_ticks_position('left')
def load_comparison_data(detailed_model, frag_model1, frag_model2=None):
v0_csv = os.path.join('../', 'data', 'pdd_chemistry',
'detailed', detailed_model,
'results', 'reactant_conv.csv')
v0_data = []
with open(v0_csv, 'r') as read_in:
for line in read_in:
tokens = line.split(' ')
entries = [float(token) for token in tokens]
v0_data.append(entries)
assert len(v0_data) == 2
v1_csv = os.path.join('../', 'data', 'pdd_chemistry',
frag_model1,
'results', 'reactant_conv.csv')
v1_data = []
with open(v1_csv, 'r') as read_in:
for line in read_in:
tokens = line.split(' ')
entries = [float(token) for token in tokens]
v1_data.append(entries)
assert len(v1_data) == 2
if frag_model2:
v2_csv = os.path.join('../', 'data', 'pdd_chemistry',
frag_model2,
'results', 'reactant_conv.csv')
v2_data = []
with open(v2_csv, 'r') as read_in:
for line in read_in:
tokens = line.split(' ')
entries = [float(token) for token in tokens]
v2_data.append(entries)
assert len(v2_data) == 2
return np.array(v0_data), np.array(v1_data), np.array(v2_data)
else:
return np.array(v0_data), np.array(v1_data), None
def plot_comparison(v0_data, v1_data, v2_data=None,
detailed_model=None,
frag_model1=None,
frag_model2=None,
xlabel='',
ylabel='',
figure_name='',
xlim=10, ylim=1.0):
init_plotting()
plt.figure()
plt.plot(v0_data[0]/3600.0, v0_data[1], label='Detailed: {0}'.format(detailed_model))
plt.plot(v1_data[0]/3600,v1_data[1], label='Fragment: {0}'.format(frag_model1))
if v2_data:
plt.plot(v2_data[0]/3600,v2_data[1], label='Fragment: {0}'.format(frag_model2))
plt.gca().set_xscale('log')
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.xlim((.1, xlim))
plt.ylim((0, ylim))
plt.gca().legend(scatterpoints=1)
plt.tight_layout()
plt.savefig(figure_name)
detailed_model = 'pdd_2014_pruning4_s4_a3ene_c11'
frag_model1 = 'two-sided'
frag_model2 = None
if frag_model2:
figure_name = 'reactant_conversion_{0}_vs_{1}'.format(frag_model1, frag_model2)
else:
figure_name = 'reactant_conversion_{0}'.format(frag_model1)
# plot reactant conversion
xlabel = 'Time / hr'
ylabel = 'Conversion'
detailed, frag1, frag2 = load_comparison_data(detailed_model, frag_model1, frag_model2)
plot_comparison(detailed, frag1, frag2,
detailed_model,
frag_model1,
frag_model2,
xlabel, ylabel,
'{0}.pdf'.format(figure_name),
xlim=14)
| mit |
RachitKansal/scikit-learn | sklearn/cluster/setup.py | 263 | 1449 | # Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.cpp'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
ankurankan/scikit-learn | sklearn/linear_model/omp.py | 6 | 29556 | """Orthogonal matching pursuit algorithms
"""
# Author: Vlad Niculae
#
# License: BSD 3 clause
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from ..utils import as_float_array, check_array, check_X_y
from ..cross_validation import _check_cv as check_cv
from ..externals.joblib import Parallel, delayed
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
premature = """ Orthogonal matching pursuit ended prematurely due to linear
dependence in the dictionary. The requested precision might not have been met.
"""
def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True,
return_path=False):
"""Orthogonal Matching Pursuit step using the Cholesky decomposition.
Parameters
----------
X : array, shape (n_samples, n_features)
Input dictionary. Columns are assumed to have unit norm.
y : array, shape (n_samples,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coef : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
if copy_X:
X = X.copy('F')
else: # even if we are allowed to overwrite, still copy it if bad order
X = np.asfortranarray(X)
min_float = np.finfo(X.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,))
potrs, = get_lapack_funcs(('potrs',), (X,))
alpha = np.dot(X.T, y)
residual = y
gamma = np.empty(0)
n_active = 0
indices = np.arange(X.shape[1]) # keeping track of swapping
max_features = X.shape[1] if tol is not None else n_nonzero_coefs
L = np.empty((max_features, max_features), dtype=X.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(np.dot(X.T, residual)))
if lam < n_active or alpha[lam] ** 2 < min_float:
# atom already selected or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
# Updates the Cholesky decomposition of X' X
L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = np.sqrt(1 - v)
X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
indices[n_active], indices[lam] = indices[lam], indices[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
residual = y - np.dot(X[:, :n_active], gamma)
if tol is not None and nrm2(residual) ** 2 <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None,
copy_Gram=True, copy_Xy=True, return_path=False):
"""Orthogonal Matching Pursuit step on a precomputed Gram matrix.
This function uses the the Cholesky decomposition method.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data matrix
Xy : array, shape (n_features,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol_0 : float
Squared norm of y, required if tol is not None.
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coefs : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
if copy_Xy:
Xy = Xy.copy()
min_float = np.finfo(Gram.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
potrs, = get_lapack_funcs(('potrs',), (Gram,))
indices = np.arange(len(Gram)) # keeping track of swapping
alpha = Xy
tol_curr = tol_0
delta = 0
gamma = np.empty(0)
n_active = 0
max_features = len(Gram) if tol is not None else n_nonzero_coefs
L = np.empty((max_features, max_features), dtype=Gram.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(alpha))
if lam < n_active or alpha[lam] ** 2 < min_float:
# selected same atom twice, or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
if n_active > 0:
L[n_active, :n_active] = Gram[lam, :n_active]
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
L[n_active, n_active] = np.sqrt(1 - v)
Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
indices[n_active], indices[lam] = indices[lam], indices[n_active]
Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
beta = np.dot(Gram[:, :n_active], gamma)
alpha = Xy - beta
if tol is not None:
tol_curr += delta
delta = np.inner(gamma, beta[:n_active])
tol_curr -= delta
if abs(tol_curr) <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def orthogonal_mp(X, y, n_nonzero_coefs=None, tol=None, precompute=False,
copy_X=True, return_path=False,
return_n_iter=False):
"""Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems.
An instance of the problem has the form:
When parametrized by the number of non-zero coefficients using
`n_nonzero_coefs`:
argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs}
When parametrized by error using the parameter `tol`:
argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol
Parameters
----------
X : array, shape (n_samples, n_features)
Input data. Columns are assumed to have unit norm.
y : array, shape (n_samples,) or (n_samples, n_targets)
Input targets
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
precompute : {True, False, 'auto'},
Whether to perform precomputations. Improves performance when n_targets
or n_samples is very large.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp_gram
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
X = check_array(X, order='F', copy=copy_X)
copy_X = False
if y.ndim == 1:
y = y.reshape(-1, 1)
y = check_array(y)
if y.shape[1] > 1: # subsequent targets will be affected
copy_X = True
if n_nonzero_coefs is None and tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1)
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > X.shape[1]:
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if precompute == 'auto':
precompute = X.shape[0] > X.shape[1]
if precompute:
G = np.dot(X.T, X)
G = np.asfortranarray(G)
Xy = np.dot(X.T, y)
if tol is not None:
norms_squared = np.sum((y ** 2), axis=0)
else:
norms_squared = None
return orthogonal_mp_gram(G, Xy, n_nonzero_coefs, tol, norms_squared,
copy_Gram=copy_X, copy_Xy=False, return_path=return_path)
if return_path:
coef = np.zeros((X.shape[1], y.shape[1], X.shape[1]))
else:
coef = np.zeros((X.shape[1], y.shape[1]))
n_iters = []
for k in range(y.shape[1]):
out = _cholesky_omp(
X, y[:, k], n_nonzero_coefs, tol,
copy_X=copy_X, return_path=return_path
)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if y.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
def orthogonal_mp_gram(Gram, Xy, n_nonzero_coefs=None, tol=None,
norms_squared=None, copy_Gram=True,
copy_Xy=True, return_path=False,
return_n_iter=False):
"""Gram Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems using only
the Gram matrix X.T * X and the product X.T * y.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data: X.T * X
Xy : array, shape (n_features,) or (n_features, n_targets)
Input targets multiplied by X: X.T * y
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
norms_squared : array-like, shape (n_targets,)
Squared L2 norms of the lines of y. Required if tol is not None.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
Gram = check_array(Gram, order='F', copy=copy_Gram)
Xy = np.asarray(Xy)
if Xy.ndim > 1 and Xy.shape[1] > 1:
# or subsequent target will be affected
copy_Gram = True
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if tol is not None:
norms_squared = [norms_squared]
if n_nonzero_coefs is None and tol is None:
n_nonzero_coefs = int(0.1 * len(Gram))
if tol is not None and norms_squared is None:
raise ValueError('Gram OMP needs the precomputed norms in order '
'to evaluate the error sum of squares.')
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > len(Gram):
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if return_path:
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)))
else:
coef = np.zeros((len(Gram), Xy.shape[1]))
n_iters = []
for k in range(Xy.shape[1]):
out = _gram_omp(
Gram, Xy[:, k], n_nonzero_coefs,
norms_squared[k] if tol is not None else None, tol,
copy_Gram=copy_Gram, copy_Xy=copy_Xy,
return_path=return_path
)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if Xy.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
class OrthogonalMatchingPursuit(LinearModel, RegressorMixin):
"""Orthogonal Mathching Pursuit model (OMP)
Parameters
----------
n_nonzero_coefs : int, optional
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float, optional
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
precompute : {True, False, 'auto'}, default 'auto'
Whether to use a precomputed Gram and Xy matrix to speed up
calculations. Improves performance when `n_targets` or `n_samples` is
very large. Note that if you already have such matrices, you can pass
them directly to the fit method.
Attributes
----------
coef_ : array, shape (n_features,) or (n_features, n_targets)
parameter vector (w in the formula)
intercept_ : float or array, shape (n_targets,)
independent term in decision function.
n_iter_ : int or array-like
Number of active features across every target.
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
decomposition.sparse_encode
"""
def __init__(self, n_nonzero_coefs=None, tol=None, fit_intercept=True,
normalize=True, precompute='auto'):
self.n_nonzero_coefs = n_nonzero_coefs
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X = check_array(X)
y = np.asarray(y)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std, Gram, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if self.n_nonzero_coefs is None and self.tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)
else:
self.n_nonzero_coefs_ = self.n_nonzero_coefs
if Gram is False:
coef_, self.n_iter_ = orthogonal_mp(
X, y, self.n_nonzero_coefs_, self.tol,
precompute=False, copy_X=True,
return_n_iter=True)
else:
norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None
coef_, self.n_iter_ = orthogonal_mp_gram(
Gram, Xy=Xy, n_nonzero_coefs=self.n_nonzero_coefs_,
tol=self.tol, norms_squared=norms_sq,
copy_Gram=True, copy_Xy=True,
return_n_iter=True)
self.coef_ = coef_.T
self._set_intercept(X_mean, y_mean, X_std)
return self
def _omp_path_residues(X_train, y_train, X_test, y_test, copy=True,
fit_intercept=True, normalize=True, max_iter=100):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied. If
False, they may be overwritten.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 100 by default.
Returns
-------
residues: array, shape (n_samples, max_features)
Residues of the prediction on the test data
"""
if copy:
X_train = X_train.copy()
y_train = y_train.copy()
X_test = X_test.copy()
y_test = y_test.copy()
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
coefs = orthogonal_mp(X_train, y_train, n_nonzero_coefs=max_iter, tol=None,
precompute=False, copy_X=False,
return_path=True)
if coefs.ndim == 1:
coefs = coefs[:, np.newaxis]
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
return np.dot(coefs.T, X_test.T) - y_test
class OrthogonalMatchingPursuitCV(LinearModel, RegressorMixin):
"""Cross-validated Orthogonal Mathching Pursuit model (OMP)
Parameters
----------
copy : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 10% of ``n_features`` but at least 5 if available.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Attributes
----------
intercept_ : float or array, shape (n_targets,)
Independent term in decision function.
coef_ : array, shape (n_features,) or (n_features, n_targets)
Parameter vector (w in the problem formulation).
n_nonzero_coefs_ : int
Estimated number of non-zero coefficients giving the best mean squared
error over the cross-validation folds.
n_iter_ : int or array-like
Number of active features across every target for the model refit with
the best hyperparameters got by cross-validating across all folds.
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
OrthogonalMatchingPursuit
LarsCV
LassoLarsCV
decomposition.sparse_encode
"""
def __init__(self, copy=True, fit_intercept=True, normalize=True,
max_iter=None, cv=None, n_jobs=1, verbose=False):
self.copy = copy
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.cv = cv
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data.
y : array-like, shape [n_samples]
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y)
cv = check_cv(self.cv, X, y, classifier=False)
max_iter = (min(max(int(0.1 * X.shape[1]), 5), X.shape[1])
if not self.max_iter
else self.max_iter)
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_omp_path_residues)(
X[train], y[train], X[test], y[test], self.copy,
self.fit_intercept, self.normalize, max_iter)
for train, test in cv)
min_early_stop = min(fold.shape[0] for fold in cv_paths)
mse_folds = np.array([(fold[:min_early_stop] ** 2).mean(axis=1)
for fold in cv_paths])
best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1
self.n_nonzero_coefs_ = best_n_nonzero_coefs
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=best_n_nonzero_coefs,
fit_intercept=self.fit_intercept,
normalize=self.normalize)
omp.fit(X, y)
self.coef_ = omp.coef_
self.intercept_ = omp.intercept_
self.n_iter_ = omp.n_iter_
return self
| bsd-3-clause |
jsharkey13/dmarc-monitoring | dmarc_analysis.py | 1 | 10932 | import datetime
import os
import argparse
from matplotlib import pyplot as plt
import matplotlib.dates as mdates
import matplotlib.patches as mpatches
import numpy as np
from dmarc_storage import DMARCStorage
def plot_percentage_passing(dates, fail, none, other, passing, category, folder=None):
fig = plt.figure(facecolor='white', figsize=(12, 8))
plt.gca().set_title('%s Status of Messages' % category)
plt.gca().set_ylabel('Percentage of Messages Received')
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y'))
plt.gca().xaxis.set_minor_locator(mdates.DayLocator())
plt.gca().set_ylim([0, 100])
red_patch = mpatches.Patch(color='#ef3e36', label='FAIL')
grey_patch = mpatches.Patch(color='#cccccc', label='NONE')
dark_patch = mpatches.Patch(color='#666666', label='OTHER')
green_patch = mpatches.Patch(color='#509e2e', label='PASS')
auth = [fail]
handles = [red_patch]
colours = ['#ef3e36']
if none is not None:
auth.append(none)
handles.append(grey_patch)
colours.append('#cccccc')
if other is not None:
auth.append(other)
handles.append(dark_patch)
colours.append('#666666')
auth.append(passing)
handles.append(green_patch)
colours.append('#509e2e')
handles.reverse()
auth_percents = (auth / np.sum(auth, axis=0).astype(float)) * 100
plt.stackplot(dates, auth_percents, colors=colours, edgecolor='none')
plt.legend(handles=handles, loc=2)
fig.autofmt_xdate()
if folder is not None:
fname = os.path.join(folder, 'percentage_passing_%s.png' % category)
fig.savefig(fname, bbox_inches='tight', dpi=600)
def plot_number_passing(dates, fail, none, other, passing, category, folder=None):
fig = plt.figure(facecolor='white', figsize=(12, 8))
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y'))
plt.gca().xaxis.set_minor_locator(mdates.DayLocator())
plt.gca().set_title('%s Status of Messages' % category)
plt.gca().set_ylabel('Number of Messages Received')
red_patch = mpatches.Patch(color='#ef3e36', label='FAIL')
grey_patch = mpatches.Patch(color='#cccccc', label='NONE')
dark_patch = mpatches.Patch(color='#666666', label='OTHER')
green_patch = mpatches.Patch(color='#509e2e', label='PASS')
fail = np.array(fail) # Else the + operator appends rather than summing!
#
handles = [red_patch]
plt.bar(dates, fail, color='#ef3e36', edgecolor='none')
bottom = fail
if none is not None:
plt.bar(dates, none, bottom=bottom, color='#cccccc', edgecolor='none')
handles.append(grey_patch)
bottom += none
if other is not None:
plt.bar(dates, other, bottom=bottom, color='#666666', edgecolor='none')
handles.append(dark_patch)
bottom += other
plt.bar(dates, passing, bottom=bottom, color='#509e2e', edgecolor='none')
bottom += passing
handles.append(green_patch)
handles.reverse()
#
plt.ylim((0, np.around(bottom.max() + 50, -2))) # Round max Y value to nearest 100
plt.legend(handles=handles, loc=2)
fig.autofmt_xdate()
if folder is not None:
fname = os.path.join(folder, 'number_passing_%s.png' % category)
fig.savefig(fname, bbox_inches='tight', dpi=600)
def generate_report(n_reports, min_time, max_time, by_disposition, by_host, by_receiver,
dkim_domains, by_status, folder=None):
report = "Isaac Emails From %s to %s\n" % (min_time, max_time)
report += "\t %d emails in %d reports\n" % (sum(by_disposition.values()), n_reports)
report += "\n\n"
TOPN = 25
LJUST = 64
RJUST = 7
LINELENGTH = 74
report += "Of all email sent:\n"
report += " - %6d emails have been rejected\n" % by_disposition.get("reject", 0)
report += " - %6d emails have been quarantined\n" % by_disposition.get("quarantine", 0)
report += " - %6d emails had no policy applied\n" % by_disposition.get("none", 0)
if by_disposition.get("reject", 0) + by_disposition.get("quarantine", 0) == 0:
report += "Publishing a 'reject' policy would have discarded %d emails.\n" % by_status.get("SPF:fail, DKIM:fail", 0)
report += "\n\n"
report += "Sender Hostname".ljust(LJUST) + "|" + "Sent".rjust(RJUST) + "\n"
report += "=" * LINELENGTH + "\n"
for host in sorted(by_host.keys(), key=lambda x: by_host[x], reverse=True)[:TOPN]:
report += host.ljust(LJUST) + "|" + str(by_host[host]).rjust(RJUST) + "\n"
if len(by_host) > TOPN:
report += "...".ljust(LJUST) + "|" + "...".rjust(RJUST) + "\n"
others = sum(sorted(by_host.values(), reverse=True)[TOPN:])
report += "[Others]".ljust(LJUST) + "|" + str(others).rjust(RJUST)
report += "\n\n\n"
report += "Receiver Name".ljust(LJUST) + "|" + "Count".rjust(RJUST) + "\n"
report += "=" * LINELENGTH + "\n"
for rec in sorted(by_receiver.keys(), key=lambda x: by_receiver[x], reverse=True)[:TOPN]:
report += rec.ljust(LJUST) + "|" + str(by_receiver[rec]).rjust(RJUST) + "\n"
if len(by_receiver) > TOPN:
report += "...".ljust(LJUST) + "|" + "...".rjust(RJUST) + "\n"
others = sum(sorted(by_receiver.values(), reverse=True)[TOPN:])
report += "[Others]".ljust(LJUST) + "|" + str(others).rjust(RJUST) + "\n"
report += "\n\n\n"
report += "DKIM Signing Domain".ljust(LJUST) + "|" + "Count".rjust(RJUST) + "\n"
report += "=" * LINELENGTH + "\n"
for domain in sorted(dkim_domains.keys(), key=lambda x: dkim_domains[x], reverse=True)[:TOPN]:
report += domain.ljust(LJUST) + "|" + str(dkim_domains[domain]).rjust(RJUST) + "\n"
if len(dkim_domains) > TOPN:
report += "...".ljust(LJUST) + "|" + "...".rjust(RJUST) + "\n"
others = sum(sorted(dkim_domains.values(), reverse=True)[TOPN:])
report += "[Others]".ljust(LJUST) + "|" + str(others).rjust(RJUST) + "\n"
report += "\n\n\n"
report += "DMARC Status".ljust(LJUST) + "|" + "Count".rjust(RJUST) + "\n"
report += "=" * LINELENGTH + "\n"
for rec in sorted(by_status.keys(), key=lambda x: by_status[x], reverse=True):
report += rec.ljust(LJUST) + "|" + str(by_status[rec]).rjust(RJUST) + "\n"
report += "\n\n\n"
report += "Policy Applied".ljust(LJUST) + "|" + "Count".rjust(RJUST) + "\n"
report += "=" * LINELENGTH + "\n"
for rec in sorted(by_disposition.keys(), key=lambda x: by_disposition[x], reverse=True):
report += rec.ljust(LJUST) + "|" + str(by_disposition[rec]).rjust(RJUST) + "\n"
if folder is not None:
fname = os.path.join(folder, 'report.txt')
with open(fname, "w") as report_file:
report_file.write(report)
return report
def _parse_and_truncate_timestamp(timestamp):
# Convert from an interger timestamp to a datetime object:
dt = datetime.datetime.utcfromtimestamp(timestamp)
# Turn this into just a date, stripping out the time part!
return datetime.date(dt.year, dt.month, dt.day)
if __name__ == "__main__":
# Allow specification of parameters at runtime:
options = argparse.ArgumentParser(description="Analyse DMARC reports stored in a databse.")
options.add_argument("-w", "--writefiles", help="write reports and graphs to files as well as stdout", action="store_true")
options.add_argument("-o", "--outputfolder", help="output report and graphs to specified folder", default="./results")
args = options.parse_args()
# Choose the output folder:
dest_folder = args.outputfolder if args.writefiles else None
if dest_folder is None:
print "Not saving report or graphs to disk!"
if (dest_folder is not None) and (not os.path.exists(dest_folder)):
os.makedirs(dest_folder)
# Connect to the databse:
sqlite_db = DMARCStorage()
# Generate a text report summary:
n_reports = sqlite_db.get_number_reports()
min_t = sqlite_db.get_reporting_start_date()
max_t = sqlite_db.get_reporting_end_date()
by_disposition = sqlite_db.get_count_by_disposition()
by_host = sqlite_db.get_count_by_hostnames()
by_receiver = sqlite_db.get_count_by_receiver()
dkim_domains = sqlite_db.get_count_by_dkim_domain()
by_status = sqlite_db.get_count_by_status_string()
print generate_report(n_reports, min_t, max_t, by_disposition, by_host, by_receiver,
dkim_domains, by_status, dest_folder)
# Produce graphs showing SPF status of messages:
res = sqlite_db.get_raw_spf_status_count_by_timestamp()
spf_passes = dict()
spf_fails = dict()
for r in res:
date = _parse_and_truncate_timestamp(r[0])
if date not in spf_passes:
spf_passes[date] = 0
if date not in spf_fails:
spf_fails[date] = 0
if r[1] == 1:
spf_passes[date] += r[2]
else:
spf_fails[date] += r[2]
dates = sorted(spf_passes.keys())
spf_passes = [spf_passes[d] for d in dates]
spf_fails = [spf_fails[d] for d in dates]
plot_number_passing(dates, spf_fails, None, None, spf_passes, "SPF", dest_folder)
plot_percentage_passing(dates, spf_fails, None, None, spf_passes, "SPF", dest_folder)
# Produce graphs showing DKIM status of messages:
res = sqlite_db.get_raw_dkim_status_count_by_timestamp()
dkim_passes = dict()
dkim_fails = dict()
for r in res:
date = _parse_and_truncate_timestamp(r[0])
if date not in dkim_passes:
dkim_passes[date] = 0
if date not in dkim_fails:
dkim_fails[date] = 0
if r[1] == 1:
dkim_passes[date] += r[2]
else:
dkim_fails[date] += r[2]
dates = sorted(dkim_passes.keys())
dkim_passes = [dkim_passes[d] for d in dates]
dkim_fails = [dkim_fails[d] for d in dates]
plot_number_passing(dates, dkim_fails, None, None, dkim_passes, "DKIM", dest_folder)
plot_percentage_passing(dates, dkim_fails, None, None, dkim_passes, "DKIM", dest_folder)
# Produce graphs showing DMARC status of messages:
res = sqlite_db.get_raw_dmarc_status_count_by_timestamp()
dmarc_passes = dict()
dmarc_fails = dict()
for r in res:
date = _parse_and_truncate_timestamp(r[0])
if date not in dmarc_passes:
dmarc_passes[date] = 0
if date not in dmarc_fails:
dmarc_fails[date] = 0
if r[1] > 0: # If one or both of SPF and DKIM passed, DMARC passes
dmarc_passes[date] += r[2]
else:
dmarc_fails[date] += r[2]
dates = sorted(dmarc_passes.keys())
dmarc_passes = [dmarc_passes[d] for d in dates]
dmarc_fails = [dmarc_fails[d] for d in dates]
plot_number_passing(dates, dmarc_fails, None, None, dmarc_passes, "DMARC", dest_folder)
plot_percentage_passing(dates, dmarc_fails, None, None, dmarc_passes, "DMARC", dest_folder)
#
# plt.show()
| mit |
idlead/scikit-learn | sklearn/ensemble/partial_dependence.py | 251 | 15097 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
pprett/scikit-learn | examples/linear_model/plot_ols_ridge_variance.py | 387 | 2060 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Ordinary Least Squares and Ridge Regression Variance
=========================================================
Due to the few points in each dimension and the straight
line that linear regression uses to follow these points
as well as it can, noise on the observations will cause
great variance as shown in the first plot. Every line's slope
can vary quite a bit for each prediction due to the noise
induced in the observations.
Ridge regression is basically minimizing a penalised version
of the least-squared function. The penalising `shrinks` the
value of the regression coefficients.
Despite the few data points in each dimension, the slope
of the prediction is much more stable and the variance
in the line itself is greatly reduced, in comparison to that
of the standard linear regression
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
X_train = np.c_[.5, 1].T
y_train = [.5, 1]
X_test = np.c_[0, 2].T
np.random.seed(0)
classifiers = dict(ols=linear_model.LinearRegression(),
ridge=linear_model.Ridge(alpha=.1))
fignum = 1
for name, clf in classifiers.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.title(name)
ax = plt.axes([.12, .12, .8, .8])
for _ in range(6):
this_X = .1 * np.random.normal(size=(2, 1)) + X_train
clf.fit(this_X, y_train)
ax.plot(X_test, clf.predict(X_test), color='.5')
ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10)
clf.fit(X_train, y_train)
ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue')
ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10)
ax.set_xticks(())
ax.set_yticks(())
ax.set_ylim((0, 1.6))
ax.set_xlabel('X')
ax.set_ylabel('y')
ax.set_xlim(0, 2)
fignum += 1
plt.show()
| bsd-3-clause |
lmallin/coverage_test | python_venv/lib/python2.7/site-packages/pandas/tests/tseries/test_holiday.py | 6 | 16104 | import pytest
from datetime import datetime
import pandas.util.testing as tm
from pandas import compat
from pandas import DatetimeIndex
from pandas.tseries.holiday import (USFederalHolidayCalendar, USMemorialDay,
USThanksgivingDay, nearest_workday,
next_monday_or_tuesday, next_monday,
previous_friday, sunday_to_monday, Holiday,
DateOffset, MO, SA, Timestamp,
AbstractHolidayCalendar, get_calendar,
HolidayCalendarFactory, next_workday,
previous_workday, before_nearest_workday,
EasterMonday, GoodFriday,
after_nearest_workday, weekend_to_monday,
USLaborDay, USColumbusDay,
USMartinLutherKingJr, USPresidentsDay)
from pytz import utc
class TestCalendar(object):
def setup_method(self, method):
self.holiday_list = [
datetime(2012, 1, 2),
datetime(2012, 1, 16),
datetime(2012, 2, 20),
datetime(2012, 5, 28),
datetime(2012, 7, 4),
datetime(2012, 9, 3),
datetime(2012, 10, 8),
datetime(2012, 11, 12),
datetime(2012, 11, 22),
datetime(2012, 12, 25)]
self.start_date = datetime(2012, 1, 1)
self.end_date = datetime(2012, 12, 31)
def test_calendar(self):
calendar = USFederalHolidayCalendar()
holidays = calendar.holidays(self.start_date, self.end_date)
holidays_1 = calendar.holidays(
self.start_date.strftime('%Y-%m-%d'),
self.end_date.strftime('%Y-%m-%d'))
holidays_2 = calendar.holidays(
Timestamp(self.start_date),
Timestamp(self.end_date))
assert list(holidays.to_pydatetime()) == self.holiday_list
assert list(holidays_1.to_pydatetime()) == self.holiday_list
assert list(holidays_2.to_pydatetime()) == self.holiday_list
def test_calendar_caching(self):
# Test for issue #9552
class TestCalendar(AbstractHolidayCalendar):
def __init__(self, name=None, rules=None):
super(TestCalendar, self).__init__(name=name, rules=rules)
jan1 = TestCalendar(rules=[Holiday('jan1', year=2015, month=1, day=1)])
jan2 = TestCalendar(rules=[Holiday('jan2', year=2015, month=1, day=2)])
tm.assert_index_equal(jan1.holidays(), DatetimeIndex(['01-Jan-2015']))
tm.assert_index_equal(jan2.holidays(), DatetimeIndex(['02-Jan-2015']))
def test_calendar_observance_dates(self):
# Test for issue 11477
USFedCal = get_calendar('USFederalHolidayCalendar')
holidays0 = USFedCal.holidays(datetime(2015, 7, 3), datetime(
2015, 7, 3)) # <-- same start and end dates
holidays1 = USFedCal.holidays(datetime(2015, 7, 3), datetime(
2015, 7, 6)) # <-- different start and end dates
holidays2 = USFedCal.holidays(datetime(2015, 7, 3), datetime(
2015, 7, 3)) # <-- same start and end dates
tm.assert_index_equal(holidays0, holidays1)
tm.assert_index_equal(holidays0, holidays2)
def test_rule_from_name(self):
USFedCal = get_calendar('USFederalHolidayCalendar')
assert USFedCal.rule_from_name('Thanksgiving') == USThanksgivingDay
class TestHoliday(object):
def setup_method(self, method):
self.start_date = datetime(2011, 1, 1)
self.end_date = datetime(2020, 12, 31)
def check_results(self, holiday, start, end, expected):
assert list(holiday.dates(start, end)) == expected
# Verify that timezone info is preserved.
assert (list(holiday.dates(utc.localize(Timestamp(start)),
utc.localize(Timestamp(end)))) ==
[utc.localize(dt) for dt in expected])
def test_usmemorialday(self):
self.check_results(holiday=USMemorialDay,
start=self.start_date,
end=self.end_date,
expected=[
datetime(2011, 5, 30),
datetime(2012, 5, 28),
datetime(2013, 5, 27),
datetime(2014, 5, 26),
datetime(2015, 5, 25),
datetime(2016, 5, 30),
datetime(2017, 5, 29),
datetime(2018, 5, 28),
datetime(2019, 5, 27),
datetime(2020, 5, 25),
], )
def test_non_observed_holiday(self):
self.check_results(
Holiday('July 4th Eve', month=7, day=3),
start="2001-01-01",
end="2003-03-03",
expected=[
Timestamp('2001-07-03 00:00:00'),
Timestamp('2002-07-03 00:00:00')
]
)
self.check_results(
Holiday('July 4th Eve', month=7, day=3, days_of_week=(0, 1, 2, 3)),
start="2001-01-01",
end="2008-03-03",
expected=[
Timestamp('2001-07-03 00:00:00'),
Timestamp('2002-07-03 00:00:00'),
Timestamp('2003-07-03 00:00:00'),
Timestamp('2006-07-03 00:00:00'),
Timestamp('2007-07-03 00:00:00'),
]
)
def test_easter(self):
self.check_results(EasterMonday,
start=self.start_date,
end=self.end_date,
expected=[
Timestamp('2011-04-25 00:00:00'),
Timestamp('2012-04-09 00:00:00'),
Timestamp('2013-04-01 00:00:00'),
Timestamp('2014-04-21 00:00:00'),
Timestamp('2015-04-06 00:00:00'),
Timestamp('2016-03-28 00:00:00'),
Timestamp('2017-04-17 00:00:00'),
Timestamp('2018-04-02 00:00:00'),
Timestamp('2019-04-22 00:00:00'),
Timestamp('2020-04-13 00:00:00'),
], )
self.check_results(GoodFriday,
start=self.start_date,
end=self.end_date,
expected=[
Timestamp('2011-04-22 00:00:00'),
Timestamp('2012-04-06 00:00:00'),
Timestamp('2013-03-29 00:00:00'),
Timestamp('2014-04-18 00:00:00'),
Timestamp('2015-04-03 00:00:00'),
Timestamp('2016-03-25 00:00:00'),
Timestamp('2017-04-14 00:00:00'),
Timestamp('2018-03-30 00:00:00'),
Timestamp('2019-04-19 00:00:00'),
Timestamp('2020-04-10 00:00:00'),
], )
def test_usthanksgivingday(self):
self.check_results(USThanksgivingDay,
start=self.start_date,
end=self.end_date,
expected=[
datetime(2011, 11, 24),
datetime(2012, 11, 22),
datetime(2013, 11, 28),
datetime(2014, 11, 27),
datetime(2015, 11, 26),
datetime(2016, 11, 24),
datetime(2017, 11, 23),
datetime(2018, 11, 22),
datetime(2019, 11, 28),
datetime(2020, 11, 26),
], )
def test_holidays_within_dates(self):
# Fix holiday behavior found in #11477
# where holiday.dates returned dates outside start/end date
# or observed rules could not be applied as the holiday
# was not in the original date range (e.g., 7/4/2015 -> 7/3/2015)
start_date = datetime(2015, 7, 1)
end_date = datetime(2015, 7, 1)
calendar = get_calendar('USFederalHolidayCalendar')
new_years = calendar.rule_from_name('New Years Day')
july_4th = calendar.rule_from_name('July 4th')
veterans_day = calendar.rule_from_name('Veterans Day')
christmas = calendar.rule_from_name('Christmas')
# Holiday: (start/end date, holiday)
holidays = {USMemorialDay: ("2015-05-25", "2015-05-25"),
USLaborDay: ("2015-09-07", "2015-09-07"),
USColumbusDay: ("2015-10-12", "2015-10-12"),
USThanksgivingDay: ("2015-11-26", "2015-11-26"),
USMartinLutherKingJr: ("2015-01-19", "2015-01-19"),
USPresidentsDay: ("2015-02-16", "2015-02-16"),
GoodFriday: ("2015-04-03", "2015-04-03"),
EasterMonday: [("2015-04-06", "2015-04-06"),
("2015-04-05", [])],
new_years: [("2015-01-01", "2015-01-01"),
("2011-01-01", []),
("2010-12-31", "2010-12-31")],
july_4th: [("2015-07-03", "2015-07-03"),
("2015-07-04", [])],
veterans_day: [("2012-11-11", []),
("2012-11-12", "2012-11-12")],
christmas: [("2011-12-25", []),
("2011-12-26", "2011-12-26")]}
for rule, dates in compat.iteritems(holidays):
empty_dates = rule.dates(start_date, end_date)
assert empty_dates.tolist() == []
if isinstance(dates, tuple):
dates = [dates]
for start, expected in dates:
if len(expected):
expected = [Timestamp(expected)]
self.check_results(rule, start, start, expected)
def test_argument_types(self):
holidays = USThanksgivingDay.dates(self.start_date, self.end_date)
holidays_1 = USThanksgivingDay.dates(
self.start_date.strftime('%Y-%m-%d'),
self.end_date.strftime('%Y-%m-%d'))
holidays_2 = USThanksgivingDay.dates(
Timestamp(self.start_date),
Timestamp(self.end_date))
tm.assert_index_equal(holidays, holidays_1)
tm.assert_index_equal(holidays, holidays_2)
def test_special_holidays(self):
base_date = [datetime(2012, 5, 28)]
holiday_1 = Holiday('One-Time', year=2012, month=5, day=28)
holiday_2 = Holiday('Range', month=5, day=28,
start_date=datetime(2012, 1, 1),
end_date=datetime(2012, 12, 31),
offset=DateOffset(weekday=MO(1)))
assert base_date == holiday_1.dates(self.start_date, self.end_date)
assert base_date == holiday_2.dates(self.start_date, self.end_date)
def test_get_calendar(self):
class TestCalendar(AbstractHolidayCalendar):
rules = []
calendar = get_calendar('TestCalendar')
assert TestCalendar == calendar.__class__
def test_factory(self):
class_1 = HolidayCalendarFactory('MemorialDay',
AbstractHolidayCalendar,
USMemorialDay)
class_2 = HolidayCalendarFactory('Thansksgiving',
AbstractHolidayCalendar,
USThanksgivingDay)
class_3 = HolidayCalendarFactory('Combined', class_1, class_2)
assert len(class_1.rules) == 1
assert len(class_2.rules) == 1
assert len(class_3.rules) == 2
class TestObservanceRules(object):
def setup_method(self, method):
self.we = datetime(2014, 4, 9)
self.th = datetime(2014, 4, 10)
self.fr = datetime(2014, 4, 11)
self.sa = datetime(2014, 4, 12)
self.su = datetime(2014, 4, 13)
self.mo = datetime(2014, 4, 14)
self.tu = datetime(2014, 4, 15)
def test_next_monday(self):
assert next_monday(self.sa) == self.mo
assert next_monday(self.su) == self.mo
def test_next_monday_or_tuesday(self):
assert next_monday_or_tuesday(self.sa) == self.mo
assert next_monday_or_tuesday(self.su) == self.tu
assert next_monday_or_tuesday(self.mo) == self.tu
def test_previous_friday(self):
assert previous_friday(self.sa) == self.fr
assert previous_friday(self.su) == self.fr
def test_sunday_to_monday(self):
assert sunday_to_monday(self.su) == self.mo
def test_nearest_workday(self):
assert nearest_workday(self.sa) == self.fr
assert nearest_workday(self.su) == self.mo
assert nearest_workday(self.mo) == self.mo
def test_weekend_to_monday(self):
assert weekend_to_monday(self.sa) == self.mo
assert weekend_to_monday(self.su) == self.mo
assert weekend_to_monday(self.mo) == self.mo
def test_next_workday(self):
assert next_workday(self.sa) == self.mo
assert next_workday(self.su) == self.mo
assert next_workday(self.mo) == self.tu
def test_previous_workday(self):
assert previous_workday(self.sa) == self.fr
assert previous_workday(self.su) == self.fr
assert previous_workday(self.tu) == self.mo
def test_before_nearest_workday(self):
assert before_nearest_workday(self.sa) == self.th
assert before_nearest_workday(self.su) == self.fr
assert before_nearest_workday(self.tu) == self.mo
def test_after_nearest_workday(self):
assert after_nearest_workday(self.sa) == self.mo
assert after_nearest_workday(self.su) == self.tu
assert after_nearest_workday(self.fr) == self.mo
class TestFederalHolidayCalendar(object):
def test_no_mlk_before_1984(self):
# see gh-10278
class MLKCalendar(AbstractHolidayCalendar):
rules = [USMartinLutherKingJr]
holidays = MLKCalendar().holidays(start='1984',
end='1988').to_pydatetime().tolist()
# Testing to make sure holiday is not incorrectly observed before 1986
assert holidays == [datetime(1986, 1, 20, 0, 0),
datetime(1987, 1, 19, 0, 0)]
def test_memorial_day(self):
class MemorialDay(AbstractHolidayCalendar):
rules = [USMemorialDay]
holidays = MemorialDay().holidays(start='1971',
end='1980').to_pydatetime().tolist()
# Fixes 5/31 error and checked manually against Wikipedia
assert holidays == [datetime(1971, 5, 31, 0, 0),
datetime(1972, 5, 29, 0, 0),
datetime(1973, 5, 28, 0, 0),
datetime(1974, 5, 27, 0, 0),
datetime(1975, 5, 26, 0, 0),
datetime(1976, 5, 31, 0, 0),
datetime(1977, 5, 30, 0, 0),
datetime(1978, 5, 29, 0, 0),
datetime(1979, 5, 28, 0, 0)]
class TestHolidayConflictingArguments(object):
def test_both_offset_observance_raises(self):
# see gh-10217
with pytest.raises(NotImplementedError):
Holiday("Cyber Monday", month=11, day=1,
offset=[DateOffset(weekday=SA(4))],
observance=next_monday)
| mit |
vortex-ape/scikit-learn | sklearn/decomposition/__init__.py | 21 | 1390 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, non_negative_factorization
from .pca import PCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from .online_lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'non_negative_factorization',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
| bsd-3-clause |
amdouglas/OpenPNM | OpenPNM/Network/__GenericNetwork__.py | 1 | 40997 | # -*- coding: utf-8 -*-
"""
===============================================================================
GenericNetwork: Abstract class to construct pore networks
===============================================================================
"""
import scipy as sp
import scipy.sparse as sprs
import scipy.spatial as sptl
import OpenPNM.Utilities.misc as misc
from OpenPNM.Utilities import topology
from OpenPNM.Base import Core, Controller, Tools, logging
logger = logging.getLogger(__name__)
ctrl = Controller()
topo = topology()
class GenericNetwork(Core):
r"""
GenericNetwork - Base class to construct pore networks
Parameters
----------
name : string
Unique name for Network object
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
logger.name = self.name
# Initialize adjacency and incidence matrix dictionaries
self._incidence_matrix = {}
self._adjacency_matrix = {}
def __setitem__(self, prop, value):
if prop == 'throat.conns':
if sp.shape(value)[1] != 2:
logger.error('Wrong size for throat conns!')
else:
mask = value[:, 0] > value[:, 1]
if mask.any():
logger.debug('The first column in (throat.conns) should be \
smaller than the second one.')
v1 = sp.copy(value[:, 0][mask])
v2 = sp.copy(value[:, 1][mask])
value[:, 0][mask] = v2
value[:, 1][mask] = v1
for geom in self._geometries:
if (prop in geom.keys()) and ('all' not in prop.split('.')):
logger.error(prop + ' is already defined in at least one associated \
Geometry object')
return
super().__setitem__(prop, value)
def __getitem__(self, key):
if key.split('.')[-1] == self.name:
element = key.split('.')[0]
return self[element+'.all']
if key not in self.keys():
logger.debug(key + ' not on Network, constructing data from Geometries')
return self._interleave_data(key, self.geometries())
else:
return super().__getitem__(key)
def _set_net(self, network):
pass
def _get_net(self):
return self
_net = property(fset=_set_net, fget=_get_net)
def create_adjacency_matrix(self, data=None, sprsfmt='coo',
dropzeros=True, sym=True):
r"""
Generates a weighted adjacency matrix in the desired sparse format
Parameters
----------
data : array_like, optional
An array containing the throat values to enter into the matrix (in
graph theory these are known as the 'weights'). If omitted, ones
are used to create a standard adjacency matrix representing
connectivity only.
sprsfmt : string, optional
The sparse storage format to return. Options are:
* 'coo' : (default) This is the native format of OpenPNM data
* 'lil' : Enables row-wise slice of data
* 'csr' : Favored by most linear algebra routines
dropzeros : boolean, optional
Remove 0 elements from the values, instead of creating 0-weighted
links, the default is True.
sym : Boolean, optional
Makes the matrix symmetric about the diagonal, the default is true.
Returns
-------
Returns an adjacency matrix in the specified Scipy sparse format
Examples
--------
>>> import OpenPNM
>>> pn = OpenPNM.Network.TestNet()
>>> vals = sp.rand(pn.num_throats(),) < 0.5
>>> temp = pn.create_adjacency_matrix(data=vals, sprsfmt='csr')
"""
logger.debug('create_adjacency_matrix: Start of method')
Np = self.num_pores()
Nt = self.num_throats()
# Check if provided data is valid
if data is None:
data = sp.ones((self.num_throats(),))
elif sp.shape(data)[0] != Nt:
raise Exception('Received dataset of incorrect length')
# Clear any zero-weighted connections
if dropzeros:
ind = data > 0
else:
ind = sp.ones_like(data, dtype=bool)
# Get connectivity info from network
conn = self['throat.conns'][ind]
row = conn[:, 0]
col = conn[:, 1]
data = data[ind]
# Append row & col to each other, and data to itself
if sym:
row = sp.append(row, conn[:, 1])
col = sp.append(col, conn[:, 0])
data = sp.append(data, data)
# Generate sparse adjacency matrix in 'coo' format
temp = sprs.coo_matrix((data, (row, col)), (Np, Np))
# Convert to requested format
if sprsfmt == 'coo':
pass # temp is already in coo format
if sprsfmt == 'csr':
temp = temp.tocsr()
if sprsfmt == 'lil':
temp = temp.tolil()
logger.debug('create_adjacency_matrix: End of method')
return temp
def create_incidence_matrix(self, data=None, sprsfmt='coo', dropzeros=True):
r"""
Creates an incidence matrix filled with supplied throat values
Parameters
----------
data : array_like, optional
An array containing the throat values to enter into the matrix (In
graph theory these are known as the 'weights'). If omitted, ones
are used to create a standard incidence matrix representing
connectivity only.
sprsfmt : string, optional
The sparse storage format to return. Options are:
* 'coo' : (default) This is the native format of OpenPNMs data
* 'lil' : Enables row-wise slice of data
* 'csr' : Favored by most linear algebra routines
dropzeros : Boolean, optional
Remove 0 elements from values, instead of creating 0-weighted
links, the default is True.
Returns
-------
An incidence matrix (a cousin to the adjacency matrix, useful for
finding throats of given a pore)
Examples
--------
>>> import OpenPNM
>>> pn = OpenPNM.Network.TestNet()
>>> vals = sp.rand(pn.num_throats(),) < 0.5
>>> temp = pn.create_incidence_matrix(data=vals,sprsfmt='csr')
"""
logger.debug('create_incidence_matrix: Start of method')
Nt = self.num_throats()
Np = self.num_pores()
# Check if provided data is valid
if data is None:
data = sp.ones((self.num_throats(),))
elif sp.shape(data)[0] != Nt:
raise Exception('Received dataset of incorrect length')
if dropzeros:
ind = data > 0
else:
ind = sp.ones_like(data, dtype=bool)
conn = self['throat.conns'][ind]
row = conn[:, 0]
row = sp.append(row, conn[:, 1])
col = self.throats('all')[ind]
col = sp.append(col, col)
data = sp.append(data[ind], data[ind])
temp = sprs.coo.coo_matrix((data, (row, col)), (Np, Nt))
# Convert to requested format
if sprsfmt == 'coo':
pass # temp is already in coo format
if sprsfmt == 'csr':
temp = temp.tocsr()
if sprsfmt == 'lil':
temp = temp.tolil()
logger.debug('create_incidence_matrix: End of method')
return temp
def find_connected_pores(self, throats=[], flatten=False):
r"""
Return a list of pores connected to the given list of throats
Parameters
----------
throats : array_like
List of throats numbers
flatten : boolean, optional
If flatten is True (default) a 1D array of unique pore numbers
is returned. If flatten is False each location in the the returned
array contains a sub-arras of neighboring pores for each input
throat, in the order they were sent.
Returns
-------
1D array (if flatten is True) or ndarray of arrays (if flatten is False)
Examples
--------
>>> import OpenPNM
>>> pn = OpenPNM.Network.TestNet()
>>> pn.find_connected_pores(throats=[0,1])
array([[0, 1],
[0, 5]])
>>> pn.find_connected_pores(throats=[0,1], flatten=True)
array([0, 1, 5])
"""
Ts = sp.array(throats, ndmin=1)
if Ts.dtype == bool:
Ts = self.toindices(Ts)
if sp.size(Ts) == 0:
return sp.ndarray([0, 2], dtype=int)
Ps = self['throat.conns'][Ts]
if flatten:
Ps = sp.unique(sp.hstack(Ps))
return Ps
def find_connecting_throat(self, P1, P2):
r"""
Return the throat number connecting pairs of pores
Parameters
----------
P1 , P2 : array_like
The pore numbers whose throats are sought. These can be vectors
of pore numbers, but must be the same length
Returns
-------
Tnum : list of list of int
Returns throat number(s), or empty array if pores are not connected
Examples
--------
>>> import OpenPNM
>>> pn = OpenPNM.Network.TestNet()
>>> pn.find_connecting_throat([0, 1, 2], [2, 2, 2])
[[], [3], []]
TODO: This now works on 'vector' inputs, but is not actually vectorized
in the Numpy sense, so could be slow with large P1,P2 inputs
"""
P1 = sp.array(P1, ndmin=1)
P2 = sp.array(P2, ndmin=1)
Ts1 = self.find_neighbor_throats(P1, flatten=False)
Ts2 = self.find_neighbor_throats(P2, flatten=False)
Ts = []
for row in range(0, len(P1)):
if P1[row] == P2[row]:
throat = []
else:
throat = sp.intersect1d(Ts1[row], Ts2[row]).tolist()
Ts.insert(0, throat)
Ts.reverse()
return Ts
def find_neighbor_pores(self, pores, mode='union', flatten=True, excl_self=True):
r"""
Returns a list of pores neighboring the given pore(s)
Parameters
----------
pores : array_like
ID numbers of pores whose neighbors are sought.
flatten : boolean, optional
If flatten is True a 1D array of unique pore ID numbers is
returned. If flatten is False the returned array contains arrays
of neighboring pores for each input pore, in the order they were
sent.
excl_self : bool, optional (Default is False)
If this is True then the input pores are not included in the
returned list. This option only applies when input pores
are in fact neighbors to each other, otherwise they are not
part of the returned list anyway.
mode : string, optional
Specifies which neighbors should be returned. The options are:
* 'union' : All neighbors of the input pores
* 'intersection' : Only neighbors shared by all input pores
* 'not_intersection' : Only neighbors not shared by any input pores
Returns
-------
neighborPs : 1D array (if flatten is True) or ndarray of ndarrays (if
flatten if False)
Examples
--------
>>> import OpenPNM
>>> pn = OpenPNM.Network.TestNet()
>>> pn.find_neighbor_pores(pores=[0, 2])
array([ 1, 3, 5, 7, 25, 27])
>>> pn.find_neighbor_pores(pores=[0, 1])
array([ 2, 5, 6, 25, 26])
>>> pn.find_neighbor_pores(pores=[0, 1], mode='union', excl_self=False)
array([ 0, 1, 2, 5, 6, 25, 26])
>>> pn.find_neighbor_pores(pores=[0, 2],flatten=False)
array([array([ 1, 5, 25]), array([ 1, 3, 7, 27])], dtype=object)
>>> pn.find_neighbor_pores(pores=[0, 2],mode='intersection')
array([1])
>>> pn.find_neighbor_pores(pores=[0, 2],mode='not_intersection')
array([ 3, 5, 7, 25, 27])
"""
pores = sp.array(pores, ndmin=1)
if pores.dtype == bool:
pores = self.toindices(pores)
if sp.size(pores) == 0:
return sp.array([], ndmin=1, dtype=int)
# Test for existence of incidence matrix
try:
neighborPs = self._adjacency_matrix['lil'].rows[[pores]]
except:
temp = self.create_adjacency_matrix(sprsfmt='lil')
self._adjacency_matrix['lil'] = temp
neighborPs = self._adjacency_matrix['lil'].rows[[pores]]
if [sp.asarray(x) for x in neighborPs if x] == []:
return sp.array([], ndmin=1)
if flatten:
# All the empty lists must be removed to maintain data type after
# hstack (numpy bug?)
neighborPs = [sp.asarray(x) for x in neighborPs if x]
neighborPs = sp.hstack(neighborPs)
neighborPs = sp.concatenate((neighborPs, pores))
# Remove references to input pores and duplicates
if mode == 'not_intersection':
neighborPs = sp.array(sp.unique(sp.where(
sp.bincount(neighborPs) == 1)[0]), dtype=int)
elif mode == 'union':
neighborPs = sp.array(sp.unique(neighborPs), int)
elif mode == 'intersection':
neighborPs = sp.array(sp.unique(sp.where(
sp.bincount(neighborPs) > 1)[0]), dtype=int)
if excl_self:
neighborPs = neighborPs[~sp.in1d(neighborPs, pores)]
else:
for i in range(0, sp.size(pores)):
neighborPs[i] = sp.array(neighborPs[i], dtype=int)
return sp.array(neighborPs, ndmin=1)
def find_neighbor_throats(self, pores, mode='union', flatten=True):
r"""
Returns a list of throats neighboring the given pore(s)
Parameters
----------
pores : array_like
Indices of pores whose neighbors are sought
flatten : boolean, optional
If flatten is True (default) a 1D array of unique throat ID numbers
is returned. If flatten is False the returned array contains arrays
of neighboring throat ID numbers for each input pore, in the order
they were sent.
mode : string, optional
Specifies which neighbors should be returned. The options are:
* 'union' : All neighbors of the input pores
* 'intersection' : Only neighbors shared by all input pores
* 'not_intersection' : Only neighbors not shared by any input pores
Returns
-------
neighborTs : 1D array (if flatten is True) or ndarray of arrays (if
flatten if False)
Examples
--------
>>> import OpenPNM
>>> pn = OpenPNM.Network.TestNet()
>>> pn.find_neighbor_throats(pores=[0, 1])
array([0, 1, 2, 3, 4, 5])
>>> pn.find_neighbor_throats(pores=[0, 1],flatten=False)
array([array([0, 1, 2]), array([0, 3, 4, 5])], dtype=object)
"""
pores = sp.array(pores, ndmin=1)
if pores.dtype == bool:
pores = self.toindices(pores)
if sp.size(pores) == 0:
return sp.array([], ndmin=1, dtype=int)
# Test for existence of incidence matrix
try:
neighborTs = self._incidence_matrix['lil'].rows[[pores]]
except:
temp = self.create_incidence_matrix(sprsfmt='lil')
self._incidence_matrix['lil'] = temp
neighborTs = self._incidence_matrix['lil'].rows[[pores]]
if [sp.asarray(x) for x in neighborTs if x] == []:
return sp.array([], ndmin=1)
if flatten:
# All the empty lists must be removed to maintain data type after
# hstack (numpy bug?)
neighborTs = [sp.asarray(x) for x in neighborTs if x]
neighborTs = sp.hstack(neighborTs)
# Remove references to input pores and duplicates
if mode == 'not_intersection':
neighborTs = sp.unique(sp.where(sp.bincount(neighborTs) == 1)[0])
elif mode == 'union':
neighborTs = sp.unique(neighborTs)
elif mode == 'intersection':
neighborTs = sp.unique(sp.where(sp.bincount(neighborTs) > 1)[0])
else:
for i in range(0, sp.size(pores)):
neighborTs[i] = sp.array(neighborTs[i])
return sp.array(neighborTs, ndmin=1)
def num_neighbors(self, pores, flatten=False):
r"""
Returns an ndarray containing the number of neigbhor pores for each
element in pores
Parameters
----------
pores : array_like
Pores whose neighbors are to be counted
flatten : boolean (optional)
If False (default) the number pore neighbors for each input are
returned as an array. If True the sum total number of unique
neighbors is counted, not including the input pores even if they
neighbor each other.
Returns
-------
num_neighbors : 1D array with number of neighbors in each element
Examples
--------
>>> import OpenPNM
>>> pn = OpenPNM.Network.TestNet()
>>> pn.num_neighbors(pores=[0, 1], flatten=False)
array([3, 4])
>>> pn.num_neighbors(pores=[0, 1], flatten=True)
5
>>> pn.num_neighbors(pores=[0, 2], flatten=True)
6
"""
pores = sp.array(pores, ndmin=1)
if pores.dtype == bool:
pores = self.toindices(pores)
if sp.size(pores) == 0:
return sp.array([], ndmin=1, dtype=int)
# Count number of neighbors
if flatten:
neighborPs = self.find_neighbor_pores(pores,
flatten=True,
mode='union',
excl_self=True)
num = sp.shape(neighborPs)[0]
else:
neighborPs = self.find_neighbor_pores(pores, flatten=False)
num = sp.zeros(sp.shape(neighborPs), dtype=int)
for i in range(0, sp.shape(num)[0]):
num[i] = sp.size(neighborPs[i])
return num
def find_interface_throats(self, labels=[]):
r"""
Finds the throats that join two pore labels.
Parameters
----------
labels : list of strings
The labels of the two pore groups whose interface is sought
Returns
-------
An array of throat numbers that connect the given pore groups
Notes
-----
This method is meant to find interfaces between TWO groups, regions or
clusters of pores (as defined by their label). If the input labels
overlap or are not adjacent, an empty array is returned.
Examples
--------
>>> import OpenPNM
>>> pn = OpenPNM.Network.TestNet()
>>> pn['pore.domain1'] = False
>>> pn['pore.domain2'] = False
>>> pn['pore.domain1'][[0, 1, 2]] = True
>>> pn['pore.domain2'][[5, 6, 7]] = True
>>> pn.find_interface_throats(labels=['domain1', 'domain2'])
array([1, 4, 7])
TODO: It might be a good idea to allow overlapping regions
"""
Tind = sp.array([], ndmin=1)
if sp.shape(labels)[0] != 2:
logger.error('Exactly two labels must be given')
pass
else:
P1 = self.pores(labels=labels[0])
P2 = self.pores(labels=labels[1])
# Check if labels overlap
if sp.sum(sp.in1d(P1, P2)) > 0:
logger.error('Some labels overlap, iterface cannot be found')
pass
else:
T1 = self.find_neighbor_throats(P1)
T2 = self.find_neighbor_throats(P2)
Tmask = sp.in1d(T1, T2)
Tind = T1[Tmask]
return Tind
def find_clusters(self, mask=[]):
r"""
Identify connected clusters of pores in the network.
Parameters
----------
mask : array_like, boolean
A list of active nodes. This method will automatically search
for clusters based on site or bond connectivity depending on
wheather the received mask is Np or Nt long.
Returns
-------
clusters : array_like
An Np long list of clusters numbers
"""
if sp.size(mask) == self.num_throats():
# Convert to boolean mask if not already
temp = sp.zeros((self.num_throats(),), dtype=bool)
temp[mask] = True
elif sp.size(mask) == self.num_pores():
conns = self.find_connected_pores(throats=self.throats())
conns[:, 0] = mask[conns[:, 0]]
conns[:, 1] = mask[conns[:, 1]]
temp = sp.array(conns[:, 0]*conns[:, 1], dtype=bool)
else:
raise Exception('Mask received was neither Nt nor Np long')
temp = self.create_adjacency_matrix(data=temp,
sprsfmt='csr',
dropzeros=True)
clusters = sprs.csgraph.connected_components(csgraph=temp,
directed=False)[1]
return clusters
def find_clusters2(self, mask=[], t_labels=False):
r"""
Identify connected clusters of pores in the network. This method can
also return a list of throat labels, which correspond to the pore
labels to which the throat is connected. Either site and bond
percolation can be consider, see description of input arguments for
details.
Parameters
----------
mask : array_like, boolean
A list of active bonds or sites (throats or pores). If the mask is
Np long, then the method will perform a site percolation, while if
the mask is Nt long bond percolation will be performed.
t_labels : boolean (default id False)
Indicates if throat cluster labels should also be returned. If true
then a tuple containing both p_clusters and t_clusters is returned.
Returns
-------
A Np long list of pore clusters numbers, unless t_labels is True in
which case a tuple containing both pore and throat cluster labels is
returned. The label numbers correspond such that pores and throats
with the same label are part of the same cluster.
Examples
--------
>>> import OpenPNM
>>> pn = OpenPNM.Network.Cubic(shape=[25, 25, 1])
>>> geom = OpenPNM.Geometry.GenericGeometry(network=pn,
... pores=pn.Ps,
... throats=pn.Ts)
>>> geom['pore.seed'] = sp.rand(pn.Np)
>>> geom['throat.seed'] = sp.rand(pn.Nt)
Bond percolation is achieved by sending a list of invaded throats:
>>> (p_bond,t_bond) = pn.find_clusters2(mask=geom['throat.seed'] < 0.3,
... t_labels=True)
Site percolation is achieved by sending a list of invaded pores:
>>> (p_site,t_site) = pn.find_clusters2(mask=geom['pore.seed'] < 0.3,
... t_labels=True)
To visualize the invasion pattern, use matplotlib's matshow method
along with the Cubic Network's asarray method which converts list based
data to square arrays:
.. code-block:: python
import matplotlib.pyplot as plt
im_bond = pn.asarray(p_bond)[:, :, 0]
im_site = pn.asarray(p_site)[:, :, 0]
plt.subplot(1, 2, 1)
plt.imshow(im_site, interpolation='none')
plt.subplot(1, 2, 2)
plt.imshow(im_bond, interpolation='none')
"""
# Parse the input arguments
mask = sp.array(mask, ndmin=1)
if mask.dtype != bool:
raise Exception('Mask must be a boolean array of Np or Nt length')
# If pore mask was given perform site percolation
if sp.size(mask) == self.Np:
(p_clusters, t_clusters) = self._site_percolation(mask)
# If pore mask was given perform bond percolation
elif sp.size(mask) == self.Nt:
(p_clusters, t_clusters) = self._bond_percolation(mask)
else:
raise Exception('Mask received was neither Nt nor Np long')
if t_labels:
return (p_clusters, t_clusters)
else:
return p_clusters
def _site_percolation(self, pmask):
r"""
"""
# Find throats that produce site percolation
conns = sp.copy(self['throat.conns'])
conns[:, 0] = pmask[conns[:, 0]]
conns[:, 1] = pmask[conns[:, 1]]
# Only if both pores are True is the throat set to True
tmask = sp.all(conns, axis=1)
# Perform the clustering using scipy.csgraph
csr = self.create_adjacency_matrix(data=tmask,
sprsfmt='csr',
dropzeros=True)
clusters = sprs.csgraph.connected_components(csgraph=csr,
directed=False)[1]
# Adjust cluster numbers such that non-invaded pores are labelled -1
# Note: The following line also takes care of assigning cluster numbers
# to single isolated invaded pores
p_clusters = (clusters + 1)*(pmask) - 1
# Label invaded throats with their neighboring pore's label
t_clusters = clusters[self['throat.conns']]
ind = (t_clusters[:, 0] == t_clusters[:, 1])
t_clusters = t_clusters[:, 0]
# Label non-invaded throats with -1
t_clusters[~ind] = -1
return (p_clusters, t_clusters)
def _bond_percolation(self, tmask):
r"""
"""
# Perform the clustering using scipy.csgraph
csr = self.create_adjacency_matrix(data=tmask,
sprsfmt='csr',
dropzeros=True)
clusters = sprs.csgraph.connected_components(csgraph=csr,
directed=False)[1]
# Convert clusters to a more usable output:
# Find pores attached to each invaded throats
Ps = self.find_connected_pores(throats=tmask, flatten=True)
# Adjust cluster numbers such that non-invaded pores are labelled -0
p_clusters = (clusters + 1)*(self.tomask(pores=Ps).astype(int)) - 1
# Label invaded throats with their neighboring pore's label
t_clusters = clusters[self['throat.conns']][:, 0]
# Label non-invaded throats with -1
t_clusters[~tmask] = -1
return (p_clusters, t_clusters)
def find_nearby_pores(self, pores, distance, flatten=False, excl_self=True):
r"""
Find all pores within a given radial distance of the input pore(s)
regardless of whether or not they are toplogically connected.
Parameters
----------
pores : array_like
The list of pores for whom nearby neighbors are to be found
distance : scalar
The maximum distance within which the nearby should be found
excl_self : bool
Controls whether the input pores should be included in the returned
list. The default is True which means they are not included.
flatten :
Returns
-------
A list of pores which are within the given spatial distance. If a
list of N pores is supplied, then a an N-long list of such lists is
returned. The returned lists each contain the pore for which the
neighbors were sought.
Examples
--------
>>> import OpenPNM
>>> pn = OpenPNM.Network.TestNet()
>>> pn.find_nearby_pores(pores=[0, 1], distance=1)
array([array([ 1, 5, 25]), array([ 0, 2, 6, 26])], dtype=object)
>>> pn.find_nearby_pores(pores=[0, 1], distance=0.5)
array([], shape=(2, 0), dtype=int64)
"""
# Convert to ND-array
pores = sp.array(pores, ndmin=1)
# Convert boolean mask to indices if necessary
if pores.dtype == bool:
pores = self.Ps[pores]
# Handle an empty array if given
if sp.size(pores) == 0:
return sp.array([], dtype=sp.int64)
if distance <= 0:
logger.error('Provided distances should be greater than 0')
if flatten:
Pn = sp.array([])
else:
Pn = sp.array([sp.array([]) for i in range(0, len(pores))])
return Pn.astype(sp.int64)
# Create kdTree objects
kd = sptl.cKDTree(self['pore.coords'])
kd_pores = sptl.cKDTree(self['pore.coords'][pores])
# Perform search
Pn = kd_pores.query_ball_tree(kd, r=distance)
# Sort the indices in each list
[Pn[i].sort() for i in range(0, sp.size(pores))]
if flatten: # Convert list of lists to a flat nd-array
temp = []
[temp.extend(Ps) for Ps in Pn]
Pn = sp.unique(temp)
if excl_self: # Remove inputs if necessary
Pn = Pn[~sp.in1d(Pn, pores)]
else: # Convert list of lists to an nd-array of nd-arrays
if excl_self: # Remove inputs if necessary
[Pn[i].remove(pores[i]) for i in range(0, sp.size(pores))]
temp = []
[temp.append(sp.array(Pn[i])) for i in range(0, sp.size(pores))]
Pn = sp.array(temp)
if Pn.dtype == float:
Pn = Pn.astype(sp.int64)
return Pn
def extend(self, pore_coords=[], throat_conns=[], labels=[]):
topo.extend(network=self, pore_coords=pore_coords,
throat_conns=throat_conns, labels=labels)
extend.__doc__ = topo.extend.__doc__
def trim(self, pores=[], throats=[]):
topo.trim(network=self, pores=pores, throats=throats)
trim.__doc__ = topo.trim.__doc__
def clone_pores(self, pores, apply_label=['clone'], mode='parents'):
topo.clone_pores(network=self, pores=pores,
apply_label=apply_label, mode=mode)
clone_pores.__doc__ = topo.clone_pores.__doc__
def stitch(self, donor, P_donor, P_network, method, len_max=sp.inf,
label_suffix=''):
topo.stitch(network=self, donor=donor, P_donor=P_donor,
P_network=P_network, method=method, len_max=len_max,
label_suffix=label_suffix)
stitch.__doc__ = topo.stitch.__doc__
def connect_pores(self, pores1, pores2, labels=[]):
topo.connect_pores(network=self,
pores1=pores1,
pores2=pores2,
labels=labels)
connect_pores.__doc__ = topo.connect_pores.__doc__
def check_network_health(self):
r"""
This method check the network topological health by checking for:
(1) Isolated pores
(2) Islands or isolated clusters of pores
(3) Duplicate throats
(4) Bidirectional throats (ie. symmetrical adjacency matrix)
Returns
-------
A dictionary containing the offending pores or throat numbers under
each named key.
It also returns a list of which pores and throats should be trimmed
from the network to restore health. This list is a suggestion only,
and is based on keeping the largest cluster and trimming the others.
Notes
-----
- Does not yet check for duplicate pores
- Does not yet suggest which throats to remove
- This is just a 'check' method and does not 'fix' the problems it finds
"""
health = Tools.HealthDict()
health['disconnected_clusters'] = []
health['isolated_pores'] = []
health['trim_pores'] = []
health['duplicate_throats'] = []
health['bidirectional_throats'] = []
# Check for individual isolated pores
Ps = self.num_neighbors(self.pores())
if sp.sum(Ps == 0) > 0:
logger.warning(str(sp.sum(Ps == 0)) + ' pores have no neighbors')
health['isolated_pores'] = sp.where(Ps == 0)[0]
# Check for separated clusters of pores
temp = []
Cs = self.find_clusters(self.tomask(throats=self.throats('all')))
if sp.shape(sp.unique(Cs))[0] > 1:
logger.warning('Isolated clusters exist in the network')
for i in sp.unique(Cs):
temp.append(sp.where(Cs == i)[0])
b = sp.array([len(item) for item in temp])
c = sp.argsort(b)[::-1]
for i in range(0, len(c)):
health['disconnected_clusters'].append(temp[c[i]])
if i > 0:
health['trim_pores'].extend(temp[c[i]])
# Check for duplicate throats
i = self['throat.conns'][:, 0]
j = self['throat.conns'][:, 1]
v = sp.array(self['throat.all'], dtype=int)
Np = self.num_pores()
adjmat = sprs.coo_matrix((v, (i, j)), [Np, Np])
temp = adjmat.tolil() # Convert to lil to combine duplicates
# Compile lists of which specfic throats are duplicates
# Be VERY careful here, as throats are not in order
mergeTs = []
for i in range(0, self.Np):
if sp.any(sp.array(temp.data[i]) > 1):
ind = sp.where(sp.array(temp.data[i]) > 1)[0]
P = sp.array(temp.rows[i])[ind]
Ts = self.find_connecting_throat(P1=i, P2=P)[0]
mergeTs.append(Ts)
health['duplicate_throats'] = mergeTs
# Check for bidirectional throats
num_full = adjmat.sum()
temp = sprs.triu(adjmat, k=1)
num_upper = temp.sum()
if num_full > num_upper:
biTs = sp.where(self['throat.conns'][:, 0] >
self['throat.conns'][:, 1])[0]
health['bidirectional_throats'] = biTs.tolist()
return health
def check_geometry_health(self):
r"""
Perform a check to find pores with overlapping or undefined Geometries
"""
geoms = self.geometries()
Ptemp = sp.zeros((self.Np,))
Ttemp = sp.zeros((self.Nt,))
for item in geoms:
Pind = self['pore.'+item]
Tind = self['throat.'+item]
Ptemp[Pind] = Ptemp[Pind] + 1
Ttemp[Tind] = Ttemp[Tind] + 1
health = Tools.HealthDict()
health['overlapping_pores'] = sp.where(Ptemp > 1)[0].tolist()
health['undefined_pores'] = sp.where(Ptemp == 0)[0].tolist()
health['overlapping_throats'] = sp.where(Ttemp > 1)[0].tolist()
health['undefined_throats'] = sp.where(Ttemp == 0)[0].tolist()
return health
def _update_network(self, mode='clear'):
r"""
Regenerates the adjacency and incidence matrices
Parameters
----------
mode : string
Controls the extent of the update. Options are:
- 'clear' : Removes exsiting adjacency and incidence matrices
- 'regenerate' : Removes the existing matrices and regenerates new ones.
Notes
-----
The 'regenerate' mode is more time consuming, so repeated calls to
this function (ie. during network merges, and adding boundaries)
should use the 'clear' mode. The other methods that require these
matrices will generate them as needed, so this pushes the 'generation'
time to 'on demand'.
"""
logger.debug('Resetting adjacency and incidence matrices')
self._adjacency_matrix['coo'] = {}
self._adjacency_matrix['csr'] = {}
self._adjacency_matrix['lil'] = {}
self._incidence_matrix['coo'] = {}
self._incidence_matrix['csr'] = {}
self._incidence_matrix['lil'] = {}
if mode == 'regenerate':
self._adjacency_matrix['coo'] = \
self.create_adjacency_matrix(sprsfmt='coo')
self._adjacency_matrix['csr'] = \
self.create_adjacency_matrix(sprsfmt='csr')
self._adjacency_matrix['lil'] = \
self.create_adjacency_matrix(sprsfmt='lil')
self._incidence_matrix['coo'] = \
self.create_incidence_matrix(sprsfmt='coo')
self._incidence_matrix['csr'] = \
self.create_incidence_matrix(sprsfmt='csr')
self._incidence_matrix['lil'] = \
self.create_incidence_matrix(sprsfmt='lil')
def domain_bulk_volume(self):
raise NotImplementedError()
def domain_pore_volume(self):
raise NotImplementedError()
def domain_length(self, face_1, face_2):
r"""
Calculate the distance between two faces of the network
Parameters
----------
face_1 and face_2 : array_like
Lists of pores belonging to opposite faces of the network
Returns
-------
The length of the domain in the specified direction
Notes
-----
- Does not yet check if input faces are perpendicular to each other
"""
# Ensure given points are coplanar before proceeding
if misc.iscoplanar(self['pore.coords'][face_1]) and \
misc.iscoplanar(self['pore.coords'][face_2]):
# Find distance between given faces
x = self['pore.coords'][face_1]
y = self['pore.coords'][face_2]
Ds = misc.dist(x, y)
L = sp.median(sp.amin(Ds, axis=0))
else:
logger.warning('The supplied pores are not coplanar. Length will be \
approximate.')
f1 = self['pore.coords'][face_1]
f2 = self['pore.coords'][face_2]
distavg = [0, 0, 0]
distavg[0] = sp.absolute(sp.average(f1[:, 0])-sp.average(f2[:, 0]))
distavg[1] = sp.absolute(sp.average(f1[:, 1])-sp.average(f2[:, 1]))
distavg[2] = sp.absolute(sp.average(f1[:, 2])-sp.average(f2[:, 2]))
L = max(distavg)
return L
def domain_area(self, face):
r"""
Calculate the area of a given network face
Parameters
----------
face : array_like
List of pores of pore defining the face of interest
Returns
-------
The area of the specified face
"""
coords = self['pore.coords'][face]
rads = self['pore.diameter'][face] / 2.
# Calculate the area of the 3 principle faces of the bounding cuboid
dx = max(coords[:, 0]+rads) - min(coords[:, 0] - rads)
dy = max(coords[:, 1]+rads) - min(coords[:, 1] - rads)
dz = max(coords[:, 2]+rads) - min(coords[:, 2] - rads)
yz = dy*dz # x normal
xz = dx*dz # y normal
xy = dx*dy # z normal
# Find the directions parallel to the plane
directions = sp.where([yz, xz, xy] != max([yz, xz, xy]))[0]
try:
# Use the whole network to do the area calculation
coords = self['pore.coords']
rads = self['pore.diameter']/2.
d0 = max(coords[:, directions[0]] + rads) - \
min(coords[:, directions[0]] - rads)
d1 = max(coords[:, directions[1]] + rads) - \
min(coords[:, directions[1]] - rads)
A = d0*d1
except:
# If that fails, use the max face area of the bounding cuboid
A = max([yz, xz, xy])
if not misc.iscoplanar(self['pore.coords'][face]):
logger.warning('The supplied pores are not coplanar. Area will be'
'approximate')
pass
return A
def _compress_labels(self, label_array):
# Make cluster number contiguous
array = sp.array(label_array)
if array.dtype != int:
raise Exception('label_array must be intergers')
min_val = sp.amin(array)
if min_val >= 0:
min_val = 0
array = array + sp.absolute(min_val)
nums = sp.unique(array)
temp = sp.zeros((sp.amax(array)+1,))
temp[nums] = sp.arange(0, sp.size(nums))
array = temp[array].astype(array.dtype)
return array
| mit |
theochem/horton | horton/grid/test/test_poisson.py | 4 | 6804 | # -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2017 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
from scipy.special import erf
import numpy as np
from nose.plugins.attrib import attr
from horton import * # pylint: disable=wildcard-import,unused-wildcard-import
@attr('slow')
def test_solve_poisson_becke_n2():
mol = IOData.from_file(context.get_fn('test/n2_hfs_sto3g.fchk'))
lmaxmax = 4
# compute hartree potential on a molecular grid
molgrid = BeckeMolGrid(mol.coordinates, mol.numbers, mol.pseudo_numbers, random_rotate=False, mode='keep')
dm_full = mol.get_dm_full()
reference = mol.obasis.compute_grid_hartree_dm(dm_full, molgrid.points)
# construct the same potential numerically with Becke's method
rho = mol.obasis.compute_grid_density_dm(dm_full, molgrid.points)
begin = 0
hds = []
for i in xrange(mol.natom):
atgrid = molgrid.subgrids[i]
end = begin + atgrid.size
becke_weights = molgrid.becke_weights[begin:end]
density_decomposition = atgrid.get_spherical_decomposition(rho[begin:end], becke_weights, lmax=lmaxmax)
hartree_decomposition = solve_poisson_becke(density_decomposition)
hds.append(hartree_decomposition)
begin = end
# Evaluate the splines obtained with Becke's method on the molecular grid
# Increasing angular momenta are used to check the convergence.
last_error = None
for lmax in xrange(0, lmaxmax+1):
result = molgrid.zeros()
for i in xrange(mol.natom):
molgrid.eval_decomposition(hds[i][:(lmax+1)**2], mol.coordinates[i], result)
potential_error = result - reference
error = molgrid.integrate(potential_error, potential_error)**0.5
if last_error is not None:
assert error < last_error
last_error = error
if False:
worst = molgrid.integrate(reference, reference)**0.5
print 'lmax=%i %12.4e %12.4e' % (lmax, error, worst)
for rho_low, rho_high in (0, 1e-8), (1e-8, 1e-4), (1e-4, 1e0), (1e0, 1e4), (1e4, 1e100):
mask = ((rho >= rho_low) & (rho < rho_high)).astype(float)
error = molgrid.integrate(potential_error, potential_error, mask)**0.5
worst = molgrid.integrate(reference, reference, mask)**0.5
print '%10.2e : %10.2e | %12.4e %12.4e' % (rho_low, rho_high, error, worst)
print
assert error < 6e-2
if False:
# Plot stuff
import matplotlib.pyplot as pt
linegrid = LineGrid(mol.coordinates[0], mol.coordinates[1], 500, 1)
rho = mol.obasis.compute_grid_density_dm(dm_full, linegrid.points)
reference = mol.obasis.compute_grid_hartree_dm(dm_full, linegrid.points)
for lmax in xrange(0, lmaxmax+1):
result = linegrid.zeros()
for i in xrange(mol.natom):
linegrid.eval_decomposition(hds[i][:(lmax+1)**2], mol.coordinates[i], result)
pt.clf()
#pt.plot(linegrid.x, reference)
#pt.plot(linegrid.x, result)
pt.plot(linegrid.x, (result - reference))
pt.ylim(-0.3, 0.3)
pt.savefig('test_poisson_%i.png' % lmax)
def test_solve_poisson_becke_sa():
sigma = 8.0
rtf = ExpRTransform(1e-4, 1e2, 500)
r = rtf.get_radii()
rhoy = np.exp(-0.5*(r/sigma)**2)/sigma**3/(2*np.pi)**1.5
rhod = np.exp(-0.5*(r/sigma)**2)/sigma**3/(2*np.pi)**1.5*(-r/sigma)/sigma
rho = CubicSpline(rhoy, rhod, rtf)
v = solve_poisson_becke([rho])[0]
s2s = np.sqrt(2)*sigma
soly = erf(r/s2s)/r
sold = np.exp(-(r/s2s)**2)*2/np.sqrt(np.pi)/s2s/r - erf(r/s2s)/r**2
if False:
import matplotlib.pyplot as pt
n = 10
pt.clf()
pt.plot(r[:n], soly[:n], label='exact')
pt.plot(r[:n], v.y[:n], label='spline')
pt.legend(loc=0)
pt.savefig('denu.png')
assert abs(v.y - soly).max()/abs(soly).max() < 1e-6
assert abs(v.dx - sold).max()/abs(sold).max() < 1e-4
# Test the boundary condition at zero and infinity
assert v.extrapolation.l == 0
np.testing.assert_allclose(v.extrapolation.amp_left, np.sqrt(2/np.pi)/sigma)
np.testing.assert_allclose(v.extrapolation.amp_right, 1.0)
def test_solve_poisson_becke_gaussian_dipole():
sigma = 8.0
rtf = ExpRTransform(1e-4, 8e1, 200)
r = rtf.get_radii()
# By deriving a Gaussian charge distribution with respect to z, we get
# rho(\mathbf{r})=Y_1^0(\Omega) rhoy, with rhoy as given below
# Note that rhoy is simply the derivative of a Gaussian charge distribution
# with respect to r.
rhoy = -r/sigma**2*np.exp(-0.5*(r/sigma)**2)/sigma**3/(2*np.pi)**1.5
rhod = (-1.0+r**2/sigma**2)/sigma**2*np.exp(-0.5*(r/sigma)**2)/sigma**3/(2*np.pi)**1.5
rho = CubicSpline(rhoy, rhod, rtf)
v = solve_poisson_becke([rho]*4)[1] # Not interested in first spline, i.e. l=0
s2s = np.sqrt(2)*sigma
# The potential corresponding to Y_1^0(\Omega), can be found by deriving
# the potential of a Gaussian charge distribution with respect to r
soly = np.exp(-(r/s2s)**2)*2/np.sqrt(np.pi)/s2s/r - erf(r/s2s)/r**2
sold = 2.0*erf(r/s2s)/r**3 - 2*2/np.sqrt(np.pi)*np.exp(-(r/s2s)**2)/s2s/r**2 - 2*2/np.sqrt(np.pi)/s2s**3*np.exp(-(r/s2s)**2)
if False:
import matplotlib.pyplot as pt
n = 200
pt.clf()
pt.plot(r[:n], -soly[:n], label='exact', marker='*')
pt.plot(r[:n], -v.y[:n], label='spline', marker='*')
r2 = np.linspace(1e-5, 2e-4, 50)
pt.plot(r2, -v(r2), label='spline eval', marker='*')
pt.xscale('log')
pt.yscale('log')
pt.legend(loc=0)
pt.savefig('poisson_gdipole.png')
assert abs(v.y - soly).max()/abs(soly).max() < 1e-6
assert abs(v.dx - sold).max()/abs(sold).max() < 1e-4
# Test the boundary condition at zero and infinity
assert v.extrapolation.l == 1
np.testing.assert_allclose(v.extrapolation.amp_left, -2.0/3.0/np.sqrt(2*np.pi)/sigma**3)
np.testing.assert_allclose(v.extrapolation.amp_right, -1.0)
| gpl-3.0 |
camisatx/IntroToPython-Fall-2016 | Code/price_extractor/price_extractor_v1.py | 1 | 1862 | from datetime import datetime
import pandas as pd
def download_prices(ticker, start, end, interval='d'):
""" Download the historical price data from Yahoo Finance for the provided
ticker over the date range.
https://stackoverflow.com/questions/35815269/python-requests-text-to-pandas-dataframe
http://chart.finance.yahoo.com/table.csv?s=AAPL&a=00&b=1&c=2010&d=10&e=30&f=2016&g=d&ignore=.csv
:param ticker: String of ticker
:param start: Datetime object of the start date
:param end: Datetime object of the end date
:param interval: Optional string for the price history interval (d, w, m, v)
:return: DataFrame of the raw stock prices
"""
# Prepare the URL API component strings
url_root = 'http://chart.finance.yahoo.com/table.csv?'
url_ticker = 's=%s' % ticker
url_interval = 'g=%s' % interval
url_start = ('a=%s&b=%s&c=%s' % ((start.month - 1), start.day, start.year))
url_end = ('d=%s&e=%s&f=%s' % ((end.month - 1), end.day, end.year))
url_csv = 'ignore=.csv'
# Final URL string for this ticker over the date range
final_url = (url_root + url_ticker + '&' + url_start + '&' + url_end +
'&' + url_interval + '&' + url_csv)
# Let pandas download the CSV file and convert it into a DataFrame
raw_df = pd.read_csv(final_url, index_col=False)
# Move the Date column to the DataFrame index
raw_df.set_index(keys='Date', inplace=True)
return raw_df
if __name__ == '__main__':
test_ticker = 'AAPL'
test_interval = 'd'
test_start = datetime(2010, 1, 1)
test_end = datetime.now() # specified date or current date
price_df = download_prices(ticker=test_ticker, start=test_start,
end=test_end, interval=test_interval)
price_df.to_csv('%s_%s.csv' % (test_ticker, test_interval))
| agpl-3.0 |
gtrensch/nest-simulator | pynest/examples/hh_psc_alpha.py | 8 | 2122 | # -*- coding: utf-8 -*-
#
# hh_psc_alpha.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Example using Hodgkin-Huxley neuron
-----------------------------------
This example produces a rate-response (FI) curve of the Hodgkin-Huxley
neuron ``hh_psc_alpha`` in response to a range of different current (DC) stimulations.
The result is plotted using matplotlib.
Since a DC input affects only the neuron's channel dynamics, this routine
does not yet check correctness of synaptic response.
"""
import nest
import numpy as np
import matplotlib.pyplot as plt
nest.set_verbosity('M_WARNING')
nest.ResetKernel()
simtime = 1000
# Amplitude range, in pA
dcfrom = 0
dcstep = 20
dcto = 2000
h = 0.1 # simulation step size in mS
neuron = nest.Create('hh_psc_alpha')
sr = nest.Create('spike_recorder')
sr.record_to = 'memory'
nest.Connect(neuron, sr, syn_spec={'weight': 1.0, 'delay': h})
# Simulation loop
n_data = int(dcto / float(dcstep))
amplitudes = np.zeros(n_data)
event_freqs = np.zeros(n_data)
for i, amp in enumerate(range(dcfrom, dcto, dcstep)):
neuron.I_e = float(amp)
print(f"Simulating with current I={amp} pA")
nest.Simulate(1000) # one second warm-up time for equilibrium state
sr.n_events = 0 # then reset spike counts
nest.Simulate(simtime) # another simulation call to record firing rate
n_events = sr.n_events
amplitudes[i] = amp
event_freqs[i] = n_events / (simtime / 1000.)
plt.plot(amplitudes, event_freqs)
plt.show()
| gpl-2.0 |
julienr/vispy | vispy/color/colormap.py | 13 | 38233 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division # just to be safe...
import inspect
import numpy as np
from .color_array import ColorArray
from ..ext.six import string_types
from ..ext.cubehelix import cubehelix
from ..ext.husl import husl_to_rgb
###############################################################################
# Color maps
# Utility functions for interpolation in NumPy.
def _vector_or_scalar(x, type='row'):
"""Convert an object to either a scalar or a row or column vector."""
if isinstance(x, (list, tuple)):
x = np.array(x)
if isinstance(x, np.ndarray):
assert x.ndim == 1
if type == 'column':
x = x[:, None]
return x
def _vector(x, type='row'):
"""Convert an object to a row or column vector."""
if isinstance(x, (list, tuple)):
x = np.array(x, dtype=np.float32)
elif not isinstance(x, np.ndarray):
x = np.array([x], dtype=np.float32)
assert x.ndim == 1
if type == 'column':
x = x[:, None]
return x
def _find_controls(x, controls=None, clip=None):
x_controls = np.clip(np.searchsorted(controls, x) - 1, 0, clip)
return x_controls.astype(np.int32)
# Normalization
def _normalize(x, cmin=None, cmax=None, clip=True):
"""Normalize an array from the range [cmin, cmax] to [0,1],
with optional clipping."""
if not isinstance(x, np.ndarray):
x = np.array(x)
if cmin is None:
cmin = x.min()
if cmax is None:
cmax = x.max()
if cmin == cmax:
return .5 * np.ones(x.shape)
else:
cmin, cmax = float(cmin), float(cmax)
y = (x - cmin) * 1. / (cmax - cmin)
if clip:
y = np.clip(y, 0., 1.)
return y
# Interpolation functions in NumPy.
def _mix_simple(a, b, x):
"""Mix b (with proportion x) with a."""
x = np.clip(x, 0.0, 1.0)
return (1.0 - x)*a + x*b
def _interpolate_multi(colors, x, controls):
x = x.ravel()
n = len(colors)
# For each element in x, the control index of its bin's left boundary.
x_step = _find_controls(x, controls, n-2)
# The length of each bin.
controls_length = np.diff(controls).astype(np.float32)
# Prevent division by zero error.
controls_length[controls_length == 0.] = 1.
# Like x, but relative to each bin.
_to_clip = x - controls[x_step]
_to_clip /= controls_length[x_step]
x_rel = np.clip(_to_clip, 0., 1.)
return (colors[x_step],
colors[x_step + 1],
x_rel[:, None])
def mix(colors, x, controls=None):
a, b, x_rel = _interpolate_multi(colors, x, controls)
return _mix_simple(a, b, x_rel)
def smoothstep(edge0, edge1, x):
""" performs smooth Hermite interpolation
between 0 and 1 when edge0 < x < edge1. """
# Scale, bias and saturate x to 0..1 range
x = np.clip((x - edge0)/(edge1 - edge0), 0.0, 1.0)
# Evaluate polynomial
return x*x*(3 - 2*x)
def step(colors, x, controls=None):
x = x.ravel()
"""Step interpolation from a set of colors. x belongs in [0, 1]."""
assert (controls[0], controls[-1]) == (0., 1.)
ncolors = len(colors)
assert ncolors == len(controls) - 1
assert ncolors >= 2
x_step = _find_controls(x, controls, ncolors-1)
return colors[x_step, ...]
# GLSL interpolation functions.
def _glsl_mix(controls=None):
"""Generate a GLSL template function from a given interpolation patterns
and control points."""
assert (controls[0], controls[-1]) == (0., 1.)
ncolors = len(controls)
assert ncolors >= 2
if ncolors == 2:
s = " return mix($color_0, $color_1, t);\n"
else:
s = ""
for i in range(ncolors-1):
if i == 0:
ifs = 'if (t < %.6f)' % (controls[i+1])
elif i == (ncolors-2):
ifs = 'else'
else:
ifs = 'else if (t < %.6f)' % (controls[i+1])
adj_t = '(t - %s) / %s' % (controls[i],
controls[i+1] - controls[i])
s += ("%s {\n return mix($color_%d, $color_%d, %s);\n} " %
(ifs, i, i+1, adj_t))
return "vec4 colormap(float t) {\n%s\n}" % s
def _glsl_step(controls=None):
assert (controls[0], controls[-1]) == (0., 1.)
ncolors = len(controls) - 1
assert ncolors >= 2
s = ""
for i in range(ncolors-1):
if i == 0:
ifs = 'if (t < %.6f)' % (controls[i+1])
elif i == (ncolors-2):
ifs = 'else'
else:
ifs = 'else if (t < %.6f)' % (controls[i+1])
s += """%s {\n return $color_%d;\n} """ % (ifs, i)
return """vec4 colormap(float t) {\n%s\n}""" % s
# Mini GLSL template system for colors.
def _process_glsl_template(template, colors):
"""Replace $color_i by color #i in the GLSL template."""
for i in range(len(colors) - 1, -1, -1):
color = colors[i]
assert len(color) == 4
vec4_color = 'vec4(%.3f, %.3f, %.3f, %.3f)' % tuple(color)
template = template.replace('$color_%d' % i, vec4_color)
return template
class BaseColormap(object):
"""Class representing a colormap:
t \in [0, 1] --> rgba_color
Parameters
----------
colors : list of lists, tuples, or ndarrays
The control colors used by the colormap (shape = (ncolors, 4)).
Notes
-----
Must be overriden. Child classes need to implement:
glsl_map : string
The GLSL function for the colormap. Use $color_0 to refer
to the first color in `colors`, and so on. These are vec4 vectors.
map(item) : function
Takes a (N, 1) vector of values in [0, 1], and returns a rgba array
of size (N, 4).
"""
# Control colors used by the colormap.
colors = None
# GLSL string with a function implementing the color map.
glsl_map = None
def __init__(self, colors=None):
# Ensure the colors are arrays.
if colors is not None:
self.colors = colors
if not isinstance(self.colors, ColorArray):
self.colors = ColorArray(self.colors)
# Process the GLSL map function by replacing $color_i by the
if len(self.colors) > 0:
self.glsl_map = _process_glsl_template(self.glsl_map,
self.colors.rgba)
def map(self, item):
"""Return a rgba array for the requested items.
This function must be overriden by child classes.
This function doesn't need to implement argument checking on `item`.
It can always assume that `item` is a (N, 1) array of values between
0 and 1.
Parameters
----------
item : ndarray
An array of values in [0,1].
Returns
-------
rgba : ndarray
An array with rgba values, with one color per item. The shape
should be ``item.shape + (4,)``.
Notes
-----
Users are expected to use a colormap with ``__getitem__()`` rather
than ``map()`` (which implements a lower-level API).
"""
raise NotImplementedError()
def __getitem__(self, item):
if isinstance(item, tuple):
raise ValueError('ColorArray indexing is only allowed along '
'the first dimension.')
# Ensure item is either a scalar or a column vector.
item = _vector(item, type='column')
# Clip the values in [0, 1].
item = np.clip(item, 0., 1.)
colors = self.map(item)
return ColorArray(colors)
def __setitem__(self, item, value):
raise RuntimeError("It is not possible to set items to "
"BaseColormap instances.")
def _repr_html_(self):
n = 100
html = ("""
<style>
table.vispy_colormap {
height: 30px;
border: 0;
margin: 0;
padding: 0;
}
table.vispy_colormap td {
width: 3px;
border: 0;
margin: 0;
padding: 0;
}
</style>
<table class="vispy_colormap">
""" +
'\n'.join([(("""<td style="background-color: %s;"
title="%s"></td>""") % (color, color))
for color in self[np.linspace(0., 1., n)].hex]) +
"""
</table>
""")
return html
def _default_controls(ncolors):
"""Generate linearly spaced control points from a set of colors."""
return np.linspace(0., 1., ncolors)
# List the parameters of every supported interpolation mode.
_interpolation_info = {
'linear': {
'ncontrols': lambda ncolors: ncolors, # take ncolors as argument
'glsl_map': _glsl_mix, # take 'controls' as argument
'map': mix,
},
'zero': {
'ncontrols': lambda ncolors: (ncolors+1),
'glsl_map': _glsl_step,
'map': step,
}
}
class Colormap(BaseColormap):
"""A colormap defining several control colors and an interpolation scheme.
Parameters
----------
colors : list of colors | ColorArray
The list of control colors. If not a ``ColorArray``, a new
``ColorArray`` instance is created from this list. See the
documentation of ``ColorArray``.
controls : array-like
The list of control points for the given colors. It should be
an increasing list of floating-point number between 0.0 and 1.0.
The first control point must be 0.0. The last control point must be
1.0. The number of control points depends on the interpolation scheme.
interpolation : str
The interpolation mode of the colormap. Default: 'linear'. Can also
be 'zero'.
If 'linear', ncontrols = ncolors (one color per control point).
If 'zero', ncontrols = ncolors+1 (one color per bin).
Examples
--------
Here is a basic example:
>>> from vispy.color import Colormap
>>> cm = Colormap(['r', 'g', 'b'])
>>> cm[0.], cm[0.5], cm[np.linspace(0., 1., 100)]
"""
def __init__(self, colors, controls=None, interpolation='linear'):
self.interpolation = interpolation
ncontrols = self._ncontrols(len(colors))
# Default controls.
if controls is None:
controls = _default_controls(ncontrols)
assert len(controls) == ncontrols
self._controls = np.array(controls, dtype=np.float32)
self.glsl_map = self._glsl_map_generator(self._controls)
super(Colormap, self).__init__(colors)
@property
def interpolation(self):
"""The interpolation mode of the colormap"""
return self._interpolation
@interpolation.setter
def interpolation(self, val):
if val not in _interpolation_info:
raise ValueError('The interpolation mode can only be one of: ' +
', '.join(sorted(_interpolation_info.keys())))
# Get the information of the interpolation mode.
info = _interpolation_info[val]
# Get the function that generates the GLSL map, as a function of the
# controls array.
self._glsl_map_generator = info['glsl_map']
# Number of controls as a function of the number of colors.
self._ncontrols = info['ncontrols']
# Python map function.
self._map_function = info['map']
self._interpolation = val
def map(self, x):
"""The Python mapping function from the [0,1] interval to a
list of rgba colors
Parameters
----------
x : array-like
The values to map.
Returns
-------
colors : list
List of rgba colors.
"""
return self._map_function(self.colors.rgba, x, self._controls)
class CubeHelixColormap(Colormap):
def __init__(self, start=0.5, rot=1, gamma=1.0, reverse=True, nlev=32,
minSat=1.2, maxSat=1.2, minLight=0., maxLight=1., **kwargs):
"""Cube helix colormap
A full implementation of Dave Green's "cubehelix" for Matplotlib.
Based on the FORTRAN 77 code provided in
D.A. Green, 2011, BASI, 39, 289.
http://adsabs.harvard.edu/abs/2011arXiv1108.5083G
User can adjust all parameters of the cubehelix algorithm.
This enables much greater flexibility in choosing color maps, while
always ensuring the color map scales in intensity from black
to white. A few simple examples:
Default color map settings produce the standard "cubehelix".
Create color map in only blues by setting rot=0 and start=0.
Create reverse (white to black) backwards through the rainbow once
by setting rot=1 and reverse=True.
Parameters
----------
start : scalar, optional
Sets the starting position in the color space. 0=blue, 1=red,
2=green. Defaults to 0.5.
rot : scalar, optional
The number of rotations through the rainbow. Can be positive
or negative, indicating direction of rainbow. Negative values
correspond to Blue->Red direction. Defaults to -1.5
gamma : scalar, optional
The gamma correction for intensity. Defaults to 1.0
reverse : boolean, optional
Set to True to reverse the color map. Will go from black to
white. Good for density plots where shade~density. Defaults to
False
nlev : scalar, optional
Defines the number of discrete levels to render colors at.
Defaults to 32.
sat : scalar, optional
The saturation intensity factor. Defaults to 1.2
NOTE: this was formerly known as "hue" parameter
minSat : scalar, optional
Sets the minimum-level saturation. Defaults to 1.2
maxSat : scalar, optional
Sets the maximum-level saturation. Defaults to 1.2
startHue : scalar, optional
Sets the starting color, ranging from [0, 360], as in
D3 version by @mbostock
NOTE: overrides values in start parameter
endHue : scalar, optional
Sets the ending color, ranging from [0, 360], as in
D3 version by @mbostock
NOTE: overrides values in rot parameter
minLight : scalar, optional
Sets the minimum lightness value. Defaults to 0.
maxLight : scalar, optional
Sets the maximum lightness value. Defaults to 1.
"""
super(CubeHelixColormap, self).__init__(
cubehelix(start=start, rot=rot, gamma=gamma, reverse=reverse,
nlev=nlev, minSat=minSat, maxSat=maxSat,
minLight=minLight, maxLight=maxLight, **kwargs))
class _Fire(BaseColormap):
colors = [(1.0, 1.0, 1.0, 1.0),
(1.0, 1.0, 0.0, 1.0),
(1.0, 0.0, 0.0, 1.0)]
glsl_map = """
vec4 fire(float t) {
return mix(mix($color_0, $color_1, t),
mix($color_1, $color_2, t*t), t);
}
"""
def map(self, t):
a, b, d = self.colors.rgba
c = _mix_simple(a, b, t)
e = _mix_simple(b, d, t**2)
return _mix_simple(c, e, t)
class _Grays(BaseColormap):
glsl_map = """
vec4 grays(float t) {
return vec4(t, t, t, 1.0);
}
"""
def map(self, t):
if isinstance(t, np.ndarray):
return np.hstack([t, t, t, np.ones(t.shape)]).astype(np.float32)
else:
return np.array([t, t, t, 1.0], dtype=np.float32)
class _Ice(BaseColormap):
glsl_map = """
vec4 ice(float t) {
return vec4(t, t, 1.0, 1.0);
}
"""
def map(self, t):
if isinstance(t, np.ndarray):
return np.hstack([t, t, np.ones(t.shape),
np.ones(t.shape)]).astype(np.float32)
else:
return np.array([t, t, 1.0, 1.0], dtype=np.float32)
class _Hot(BaseColormap):
colors = [(0., .33, .66, 1.0),
(.33, .66, 1., 1.0)]
glsl_map = """
vec4 hot(float t) {
return vec4(smoothstep($color_0.rgb, $color_1.rgb, vec3(t, t, t)),
1.0);
}
"""
def map(self, t):
rgba = self.colors.rgba
smoothed = smoothstep(rgba[0, :3], rgba[1, :3], t)
return np.hstack((smoothed, np.ones((len(t), 1))))
class _Winter(BaseColormap):
colors = [(0.0, 0.0, 1.0, 1.0),
(0.0, 1.0, 0.5, 1.0)]
glsl_map = """
vec4 winter(float t) {
return mix($color_0, $color_1, sqrt(t));
}
"""
def map(self, t):
return _mix_simple(self.colors.rgba[0],
self.colors.rgba[1],
np.sqrt(t))
class _SingleHue(Colormap):
"""A colormap which is solely defined by the given hue and value.
Given the color hue and value, this color map increases the saturation
of a color. The start color is almost white but still contains a hint of
the given color, and at the end the color is fully saturated.
Parameters
----------
hue : scalar, optional
The hue refers to a "true" color, without any shading or tinting.
Must be in the range [0, 360]. Defaults to 200 (blue).
saturation_range : array-like, optional
The saturation represents how "pure" a color is. Less saturation means
more white light mixed in the color. A fully saturated color means
the pure color defined by the hue. No saturation means completely
white. This colormap changes the saturation, and with this parameter
you can specify the lower and upper bound. Default is [0.2, 0.8].
value : scalar, optional
The value defines the "brightness" of a color: a value of 0.0 means
completely black while a value of 1.0 means the color defined by the
hue without shading. Must be in the range [0, 1.0]. The default value
is 1.0.
Notes
-----
For more information about the hue values see the `wikipedia page`_.
.. _wikipedia page: https://en.wikipedia.org/wiki/Hue
"""
def __init__(self, hue=200, saturation_range=[0.1, 0.8], value=1.0):
colors = ColorArray([
(hue, saturation_range[0], value),
(hue, saturation_range[1], value)
], color_space='hsv')
super(_SingleHue, self).__init__(colors)
class _HSL(Colormap):
"""A colormap which is defined by n evenly spaced points in
a circular color space.
This means that we change the hue value while keeping the
saturation and value constant.
Parameters
---------
n_colors : int, optional
The number of colors to generate.
hue_start : int, optional
The hue start value. Must be in the range [0, 360], the default is 0.
saturation : float, optional
The saturation component of the colors to generate. The default is
fully saturated (1.0). Must be in the range [0, 1.0].
value : float, optional
The value (brightness) component of the colors to generate. Must
be in the range [0, 1.0], and the default is 1.0
controls : array-like, optional
The list of control points for the colors to generate. It should be
an increasing list of floating-point number between 0.0 and 1.0.
The first control point must be 0.0. The last control point must be
1.0. The number of control points depends on the interpolation scheme.
interpolation : str, optional
The interpolation mode of the colormap. Default: 'linear'. Can also
be 'zero'.
If 'linear', ncontrols = ncolors (one color per control point).
If 'zero', ncontrols = ncolors+1 (one color per bin).
"""
def __init__(self, ncolors=6, hue_start=0, saturation=1.0, value=1.0,
controls=None, interpolation='linear'):
hues = np.linspace(0, 360, ncolors + 1)[:-1]
hues += hue_start
hues %= 360
colors = ColorArray([(hue, saturation, value) for hue in hues],
color_space='hsv')
super(_HSL, self).__init__(colors, controls=controls,
interpolation=interpolation)
class _HUSL(Colormap):
"""A colormap which is defined by n evenly spaced points in
the HUSL hue space.
Parameters
---------
n_colors : int, optional
The number of colors to generate.
hue_start : int, optional
The hue start value. Must be in the range [0, 360], the default is 0.
saturation : float, optional
The saturation component of the colors to generate. The default is
fully saturated (1.0). Must be in the range [0, 1.0].
value : float, optional
The value component of the colors to generate or "brightness". Must
be in the range [0, 1.0], and the default is 0.7.
controls : array-like, optional
The list of control points for the colors to generate. It should be
an increasing list of floating-point number between 0.0 and 1.0.
The first control point must be 0.0. The last control point must be
1.0. The number of control points depends on the interpolation scheme.
interpolation : str, optional
The interpolation mode of the colormap. Default: 'linear'. Can also
be 'zero'.
If 'linear', ncontrols = ncolors (one color per control point).
If 'zero', ncontrols = ncolors+1 (one color per bin).
Notes
-----
For more information about HUSL colors see http://husl-colors.org
"""
def __init__(self, ncolors=6, hue_start=0, saturation=1.0, value=0.7,
controls=None, interpolation='linear'):
hues = np.linspace(0, 360, ncolors + 1)[:-1]
hues += hue_start
hues %= 360
saturation *= 99
value *= 99
colors = ColorArray(
[husl_to_rgb(hue, saturation, value) for hue in hues],
)
super(_HUSL, self).__init__(colors, controls=controls,
interpolation=interpolation)
class _Diverging(Colormap):
def __init__(self, h_pos=20, h_neg=250, saturation=1.0, value=0.7,
center="light"):
saturation *= 99
value *= 99
start = husl_to_rgb(h_neg, saturation, value)
mid = ((0.133, 0.133, 0.133) if center == "dark" else
(0.92, 0.92, 0.92))
end = husl_to_rgb(h_pos, saturation, value)
colors = ColorArray([start, mid, end])
super(_Diverging, self).__init__(colors)
# https://github.com/matplotlib/matplotlib/pull/4707/files#diff-893cf0348279e9f4570488a7a297ab1eR774
# Taken from original Viridis colormap data in matplotlib implementation
# Sampled 128 points from the raw data-set of 256 samples.
# Sub sampled to 128 points since 256 points causes VisPy to freeze.
# HACK: Ideally, all 256 points should be included, with VisPy generating
# a 1D texture lookup for ColorMap, rather than branching code.
_viridis_data = [[0.267004, 0.004874, 0.329415],
[0.268510, 0.009605, 0.335427],
[0.269944, 0.014625, 0.341379],
[0.271305, 0.019942, 0.347269],
[0.272594, 0.025563, 0.353093],
[0.273809, 0.031497, 0.358853],
[0.274952, 0.037752, 0.364543],
[0.276022, 0.044167, 0.370164],
[0.277018, 0.050344, 0.375715],
[0.277941, 0.056324, 0.381191],
[0.278791, 0.062145, 0.386592],
[0.279566, 0.067836, 0.391917],
[0.280267, 0.073417, 0.397163],
[0.280894, 0.078907, 0.402329],
[0.281446, 0.084320, 0.407414],
[0.281924, 0.089666, 0.412415],
[0.282327, 0.094955, 0.417331],
[0.282656, 0.100196, 0.422160],
[0.282910, 0.105393, 0.426902],
[0.283091, 0.110553, 0.431554],
[0.283197, 0.115680, 0.436115],
[0.283229, 0.120777, 0.440584],
[0.283187, 0.125848, 0.444960],
[0.283072, 0.130895, 0.449241],
[0.282884, 0.135920, 0.453427],
[0.282623, 0.140926, 0.457517],
[0.282290, 0.145912, 0.461510],
[0.281887, 0.150881, 0.465405],
[0.281412, 0.155834, 0.469201],
[0.280868, 0.160771, 0.472899],
[0.280255, 0.165693, 0.476498],
[0.279574, 0.170599, 0.479997],
[0.278826, 0.175490, 0.483397],
[0.278012, 0.180367, 0.486697],
[0.277134, 0.185228, 0.489898],
[0.276194, 0.190074, 0.493001],
[0.275191, 0.194905, 0.496005],
[0.274128, 0.199721, 0.498911],
[0.273006, 0.204520, 0.501721],
[0.271828, 0.209303, 0.504434],
[0.270595, 0.214069, 0.507052],
[0.269308, 0.218818, 0.509577],
[0.267968, 0.223549, 0.512008],
[0.266580, 0.228262, 0.514349],
[0.265145, 0.232956, 0.516599],
[0.263663, 0.237631, 0.518762],
[0.262138, 0.242286, 0.520837],
[0.260571, 0.246922, 0.522828],
[0.258965, 0.251537, 0.524736],
[0.257322, 0.256130, 0.526563],
[0.255645, 0.260703, 0.528312],
[0.253935, 0.265254, 0.529983],
[0.252194, 0.269783, 0.531579],
[0.250425, 0.274290, 0.533103],
[0.248629, 0.278775, 0.534556],
[0.246811, 0.283237, 0.535941],
[0.244972, 0.287675, 0.537260],
[0.243113, 0.292092, 0.538516],
[0.241237, 0.296485, 0.539709],
[0.239346, 0.300855, 0.540844],
[0.237441, 0.305202, 0.541921],
[0.235526, 0.309527, 0.542944],
[0.233603, 0.313828, 0.543914],
[0.231674, 0.318106, 0.544834],
[0.229739, 0.322361, 0.545706],
[0.227802, 0.326594, 0.546532],
[0.225863, 0.330805, 0.547314],
[0.223925, 0.334994, 0.548053],
[0.221989, 0.339161, 0.548752],
[0.220057, 0.343307, 0.549413],
[0.218130, 0.347432, 0.550038],
[0.216210, 0.351535, 0.550627],
[0.214298, 0.355619, 0.551184],
[0.212395, 0.359683, 0.551710],
[0.210503, 0.363727, 0.552206],
[0.208623, 0.367752, 0.552675],
[0.206756, 0.371758, 0.553117],
[0.204903, 0.375746, 0.553533],
[0.203063, 0.379716, 0.553925],
[0.201239, 0.383670, 0.554294],
[0.199430, 0.387607, 0.554642],
[0.197636, 0.391528, 0.554969],
[0.195860, 0.395433, 0.555276],
[0.194100, 0.399323, 0.555565],
[0.192357, 0.403199, 0.555836],
[0.190631, 0.407061, 0.556089],
[0.188923, 0.410910, 0.556326],
[0.187231, 0.414746, 0.556547],
[0.185556, 0.418570, 0.556753],
[0.183898, 0.422383, 0.556944],
[0.182256, 0.426184, 0.557120],
[0.180629, 0.429975, 0.557282],
[0.179019, 0.433756, 0.557430],
[0.177423, 0.437527, 0.557565],
[0.175841, 0.441290, 0.557685],
[0.174274, 0.445044, 0.557792],
[0.172719, 0.448791, 0.557885],
[0.171176, 0.452530, 0.557965],
[0.169646, 0.456262, 0.558030],
[0.168126, 0.459988, 0.558082],
[0.166617, 0.463708, 0.558119],
[0.165117, 0.467423, 0.558141],
[0.163625, 0.471133, 0.558148],
[0.162142, 0.474838, 0.558140],
[0.160665, 0.478540, 0.558115],
[0.159194, 0.482237, 0.558073],
[0.157729, 0.485932, 0.558013],
[0.156270, 0.489624, 0.557936],
[0.154815, 0.493313, 0.557840],
[0.153364, 0.497000, 0.557724],
[0.151918, 0.500685, 0.557587],
[0.150476, 0.504369, 0.557430],
[0.149039, 0.508051, 0.557250],
[0.147607, 0.511733, 0.557049],
[0.146180, 0.515413, 0.556823],
[0.144759, 0.519093, 0.556572],
[0.143343, 0.522773, 0.556295],
[0.141935, 0.526453, 0.555991],
[0.140536, 0.530132, 0.555659],
[0.139147, 0.533812, 0.555298],
[0.137770, 0.537492, 0.554906],
[0.136408, 0.541173, 0.554483],
[0.135066, 0.544853, 0.554029],
[0.133743, 0.548535, 0.553541],
[0.132444, 0.552216, 0.553018],
[0.131172, 0.555899, 0.552459],
[0.129933, 0.559582, 0.551864],
[0.128729, 0.563265, 0.551229],
[0.127568, 0.566949, 0.550556],
[0.126453, 0.570633, 0.549841],
[0.125394, 0.574318, 0.549086],
[0.124395, 0.578002, 0.548287],
[0.123463, 0.581687, 0.547445],
[0.122606, 0.585371, 0.546557],
[0.121831, 0.589055, 0.545623],
[0.121148, 0.592739, 0.544641],
[0.120565, 0.596422, 0.543611],
[0.120092, 0.600104, 0.542530],
[0.119738, 0.603785, 0.541400],
[0.119512, 0.607464, 0.540218],
[0.119423, 0.611141, 0.538982],
[0.119483, 0.614817, 0.537692],
[0.119699, 0.618490, 0.536347],
[0.120081, 0.622161, 0.534946],
[0.120638, 0.625828, 0.533488],
[0.121380, 0.629492, 0.531973],
[0.122312, 0.633153, 0.530398],
[0.123444, 0.636809, 0.528763],
[0.124780, 0.640461, 0.527068],
[0.126326, 0.644107, 0.525311],
[0.128087, 0.647749, 0.523491],
[0.130067, 0.651384, 0.521608],
[0.132268, 0.655014, 0.519661],
[0.134692, 0.658636, 0.517649],
[0.137339, 0.662252, 0.515571],
[0.140210, 0.665859, 0.513427],
[0.143303, 0.669459, 0.511215],
[0.146616, 0.673050, 0.508936],
[0.150148, 0.676631, 0.506589],
[0.153894, 0.680203, 0.504172],
[0.157851, 0.683765, 0.501686],
[0.162016, 0.687316, 0.499129],
[0.166383, 0.690856, 0.496502],
[0.170948, 0.694384, 0.493803],
[0.175707, 0.697900, 0.491033],
[0.180653, 0.701402, 0.488189],
[0.185783, 0.704891, 0.485273],
[0.191090, 0.708366, 0.482284],
[0.196571, 0.711827, 0.479221],
[0.202219, 0.715272, 0.476084],
[0.208030, 0.718701, 0.472873],
[0.214000, 0.722114, 0.469588],
[0.220124, 0.725509, 0.466226],
[0.226397, 0.728888, 0.462789],
[0.232815, 0.732247, 0.459277],
[0.239374, 0.735588, 0.455688],
[0.246070, 0.738910, 0.452024],
[0.252899, 0.742211, 0.448284],
[0.259857, 0.745492, 0.444467],
[0.266941, 0.748751, 0.440573],
[0.274149, 0.751988, 0.436601],
[0.281477, 0.755203, 0.432552],
[0.288921, 0.758394, 0.428426],
[0.296479, 0.761561, 0.424223],
[0.304148, 0.764704, 0.419943],
[0.311925, 0.767822, 0.415586],
[0.319809, 0.770914, 0.411152],
[0.327796, 0.773980, 0.406640],
[0.335885, 0.777018, 0.402049],
[0.344074, 0.780029, 0.397381],
[0.352360, 0.783011, 0.392636],
[0.360741, 0.785964, 0.387814],
[0.369214, 0.788888, 0.382914],
[0.377779, 0.791781, 0.377939],
[0.386433, 0.794644, 0.372886],
[0.395174, 0.797475, 0.367757],
[0.404001, 0.800275, 0.362552],
[0.412913, 0.803041, 0.357269],
[0.421908, 0.805774, 0.351910],
[0.430983, 0.808473, 0.346476],
[0.440137, 0.811138, 0.340967],
[0.449368, 0.813768, 0.335384],
[0.458674, 0.816363, 0.329727],
[0.468053, 0.818921, 0.323998],
[0.477504, 0.821444, 0.318195],
[0.487026, 0.823929, 0.312321],
[0.496615, 0.826376, 0.306377],
[0.506271, 0.828786, 0.300362],
[0.515992, 0.831158, 0.294279],
[0.525776, 0.833491, 0.288127],
[0.535621, 0.835785, 0.281908],
[0.545524, 0.838039, 0.275626],
[0.555484, 0.840254, 0.269281],
[0.565498, 0.842430, 0.262877],
[0.575563, 0.844566, 0.256415],
[0.585678, 0.846661, 0.249897],
[0.595839, 0.848717, 0.243329],
[0.606045, 0.850733, 0.236712],
[0.616293, 0.852709, 0.230052],
[0.626579, 0.854645, 0.223353],
[0.636902, 0.856542, 0.216620],
[0.647257, 0.858400, 0.209861],
[0.657642, 0.860219, 0.203082],
[0.668054, 0.861999, 0.196293],
[0.678489, 0.863742, 0.189503],
[0.688944, 0.865448, 0.182725],
[0.699415, 0.867117, 0.175971],
[0.709898, 0.868751, 0.169257],
[0.720391, 0.870350, 0.162603],
[0.730889, 0.871916, 0.156029],
[0.741388, 0.873449, 0.149561],
[0.751884, 0.874951, 0.143228],
[0.762373, 0.876424, 0.137064],
[0.772852, 0.877868, 0.131109],
[0.783315, 0.879285, 0.125405],
[0.793760, 0.880678, 0.120005],
[0.804182, 0.882046, 0.114965],
[0.814576, 0.883393, 0.110347],
[0.824940, 0.884720, 0.106217],
[0.835270, 0.886029, 0.102646],
[0.845561, 0.887322, 0.099702],
[0.855810, 0.888601, 0.097452],
[0.866013, 0.889868, 0.095953],
[0.876168, 0.891125, 0.095250],
[0.886271, 0.892374, 0.095374],
[0.896320, 0.893616, 0.096335],
[0.906311, 0.894855, 0.098125],
[0.916242, 0.896091, 0.100717],
[0.926106, 0.897330, 0.104071],
[0.935904, 0.898570, 0.108131],
[0.945636, 0.899815, 0.112838],
[0.955300, 0.901065, 0.118128],
[0.964894, 0.902323, 0.123941],
[0.974417, 0.903590, 0.130215],
[0.983868, 0.904867, 0.136897],
[0.993248, 0.906157, 0.143936]]
_colormaps = dict(
# Some colormap presets
autumn=Colormap([(1., 0., 0., 1.), (1., 1., 0., 1.)]),
blues=Colormap([(1., 1., 1., 1.), (0., 0., 1., 1.)]),
cool=Colormap([(0., 1., 1., 1.), (1., 0., 1., 1.)]),
greens=Colormap([(1., 1., 1., 1.), (0., 1., 0., 1.)]),
reds=Colormap([(1., 1., 1., 1.), (1., 0., 0., 1.)]),
spring=Colormap([(1., 0., 1., 1.), (1., 1., 0., 1.)]),
summer=Colormap([(0., .5, .4, 1.), (1., 1., .4, 1.)]),
fire=_Fire(),
grays=_Grays(),
hot=_Hot(),
ice=_Ice(),
winter=_Winter(),
light_blues=_SingleHue(),
orange=_SingleHue(hue=35),
viridis=Colormap(ColorArray(_viridis_data[::2])),
# Diverging presets
coolwarm=Colormap(ColorArray(
[
(226, 0.59, 0.92), (222, 0.44, 0.99), (218, 0.26, 0.97),
(30, 0.01, 0.87),
(20, 0.3, 0.96), (15, 0.5, 0.95), (8, 0.66, 0.86)
],
color_space="hsv"
)),
PuGr=_Diverging(145, 280, 0.85, 0.30),
GrBu=_Diverging(255, 133, 0.75, 0.6),
GrBu_d=_Diverging(255, 133, 0.75, 0.6, "dark"),
RdBu=_Diverging(220, 20, 0.75, 0.5),
# Configurable colormaps
cubehelix=CubeHelixColormap,
single_hue=_SingleHue,
hsl=_HSL,
husl=_HUSL,
diverging=_Diverging
)
def get_colormap(name, *args, **kwargs):
"""Obtain a colormap
Some colormaps can have additional configuration parameters. Refer to
their corresponding documentation for more information.
Parameters
----------
name : str | Colormap
Colormap name. Can also be a Colormap for pass-through.
Examples
--------
>>> get_colormap('autumn')
>>> get_colormap('single_hue', hue=10)
"""
if isinstance(name, BaseColormap):
cmap = name
else:
if not isinstance(name, string_types):
raise TypeError('colormap must be a Colormap or string name')
if name not in _colormaps:
raise KeyError('colormap name %s not found' % name)
cmap = _colormaps[name]
if inspect.isclass(cmap):
cmap = cmap(*args, **kwargs)
return cmap
def get_colormaps():
"""Return the list of colormap names."""
return _colormaps.copy()
| bsd-3-clause |
danking/hail | hail/python/hail/methods/statgen.py | 1 | 151412 | import itertools
import math
import numpy as np
from typing import Dict, Callable
import builtins
import hail
import hail as hl
import hail.expr.aggregators as agg
from hail.expr import (Expression, ExpressionException, expr_float64, expr_call,
expr_any, expr_numeric, expr_locus, analyze, check_entry_indexed,
check_row_indexed, matrix_table_source, table_source)
from hail.expr.types import tbool, tarray, tfloat64, tint32
from hail import ir
from hail.genetics.reference_genome import reference_genome_type
from hail.linalg import BlockMatrix
from hail.matrixtable import MatrixTable
from hail.methods.misc import require_biallelic, require_row_key_variant
from hail.stats import LinearMixedModel
from hail.table import Table
from hail.typecheck import (typecheck, nullable, numeric, oneof, sequenceof,
enumeration, anytype)
from hail.utils import wrap_to_list, new_temp_file, FatalError
from hail.utils.java import Env, info, warning
from . import relatedness
from . import pca
from ..backend.spark_backend import SparkBackend
pc_relate = relatedness.pc_relate
identity_by_descent = relatedness.identity_by_descent
_blanczos_pca = pca._blanczos_pca
_hwe_normalized_blanczos = pca._hwe_normalized_blanczos
hwe_normalized_pca = pca.hwe_normalized_pca
pca = pca.pca
@typecheck(call=expr_call,
aaf_threshold=numeric,
include_par=bool,
female_threshold=numeric,
male_threshold=numeric,
aaf=nullable(str))
def impute_sex(call, aaf_threshold=0.0, include_par=False, female_threshold=0.2, male_threshold=0.8, aaf=None) -> Table:
r"""Impute sex of samples by calculating inbreeding coefficient on the
X chromosome.
.. include:: ../_templates/req_tvariant.rst
.. include:: ../_templates/req_biallelic.rst
Examples
--------
Remove samples where imputed sex does not equal reported sex:
>>> imputed_sex = hl.impute_sex(dataset.GT)
>>> dataset_result = dataset.filter_cols(imputed_sex[dataset.s].is_female != dataset.pheno.is_female,
... keep=False)
Notes
-----
We have used the same implementation as `PLINK v1.7
<https://zzz.bwh.harvard.edu/plink/summary.shtml#sexcheck>`__.
Let `gr` be the the reference genome of the type of the `locus` key (as
given by :attr:`.tlocus.reference_genome`)
1. Filter the dataset to loci on the X contig defined by `gr`.
2. Calculate alternate allele frequency (AAF) for each row from the dataset.
3. Filter to variants with AAF above `aaf_threshold`.
4. Remove loci in the pseudoautosomal region, as defined by `gr`, unless
`include_par` is ``True`` (it defaults to ``False``)
5. For each row and column with a non-missing genotype call, :math:`E`, the
expected number of homozygotes (from population AAF), is computed as
:math:`1.0 - (2.0*\mathrm{maf}*(1.0-\mathrm{maf}))`.
6. For each row and column with a non-missing genotype call, :math:`O`, the
observed number of homozygotes, is computed interpreting ``0`` as
heterozygote and ``1`` as homozygote`
7. For each row and column with a non-missing genotype call, :math:`N` is
incremented by 1
8. For each column, :math:`E`, :math:`O`, and :math:`N` are combined across
variants
9. For each column, :math:`F` is calculated by :math:`(O - E) / (N - E)`
10. A sex is assigned to each sample with the following criteria:
- Female when ``F < 0.2``
- Male when ``F > 0.8``
Use `female_threshold` and `male_threshold` to change this behavior.
**Annotations**
The returned column-key indexed :class:`.Table` has the following fields in
addition to the matrix table's column keys:
- **is_female** (:py:data:`.tbool`) -- True if the imputed sex is female,
false if male, missing if undetermined.
- **f_stat** (:py:data:`.tfloat64`) -- Inbreeding coefficient.
- **n_called** (:py:data:`.tint64`) -- Number of variants with a genotype call.
- **expected_homs** (:py:data:`.tfloat64`) -- Expected number of homozygotes.
- **observed_homs** (:py:data:`.tint64`) -- Observed number of homozygotes.
call : :class:`.CallExpression`
A genotype call for each row and column. The source dataset's row keys
must be [[locus], alleles] with types :class:`.tlocus` and
:class:`.tarray` of :obj:`.tstr`. Moreover, the alleles array must have
exactly two elements (i.e. the variant must be biallelic).
aaf_threshold : :obj:`float`
Minimum alternate allele frequency threshold.
include_par : :obj:`bool`
Include pseudoautosomal regions.
female_threshold : :obj:`float`
Samples are called females if F < female_threshold.
male_threshold : :obj:`float`
Samples are called males if F > male_threshold.
aaf : :class:`str` or :obj:`None`
A field defining the alternate allele frequency for each row. If
``None``, AAF will be computed from `call`.
Return
------
:class:`.Table`
Sex imputation statistics per sample.
"""
if aaf_threshold < 0.0 or aaf_threshold > 1.0:
raise FatalError("Invalid argument for `aaf_threshold`. Must be in range [0, 1].")
mt = call._indices.source
mt, _ = mt._process_joins(call)
mt = mt.annotate_entries(call=call)
mt = require_biallelic(mt, 'impute_sex')
if (aaf is None):
mt = mt.annotate_rows(aaf=agg.call_stats(mt.call, mt.alleles).AF[1])
aaf = 'aaf'
rg = mt.locus.dtype.reference_genome
mt = hl.filter_intervals(mt,
hl.map(lambda x_contig: hl.parse_locus_interval(x_contig, rg), rg.x_contigs),
keep=True)
if not include_par:
interval_type = hl.tarray(hl.tinterval(hl.tlocus(rg)))
mt = hl.filter_intervals(mt,
hl.literal(rg.par, interval_type),
keep=False)
mt = mt.filter_rows((mt[aaf] > aaf_threshold) & (mt[aaf] < (1 - aaf_threshold)))
mt = mt.annotate_cols(ib=agg.inbreeding(mt.call, mt[aaf]))
kt = mt.select_cols(
is_female=hl.if_else(mt.ib.f_stat < female_threshold,
True,
hl.if_else(mt.ib.f_stat > male_threshold,
False,
hl.missing(tbool))),
**mt.ib).cols()
return kt
def _get_regression_row_fields(mt, pass_through, method) -> Dict[str, str]:
row_fields = dict(zip(mt.row_key.keys(), mt.row_key.keys()))
for f in pass_through:
if isinstance(f, str):
if f not in mt.row:
raise ValueError(f"'{method}/pass_through': MatrixTable has no row field {repr(f)}")
if f in row_fields:
# allow silent pass through of key fields
if f in mt.row_key:
pass
else:
raise ValueError(f"'{method}/pass_through': found duplicated field {repr(f)}")
row_fields[f] = mt[f]
else:
assert isinstance(f, Expression)
if not f._ir.is_nested_field:
raise ValueError(f"'{method}/pass_through': expect fields or nested fields, not complex expressions")
if not f._indices == mt._row_indices:
raise ExpressionException(f"'{method}/pass_through': require row-indexed fields, found indices {f._indices.axes}")
name = f._ir.name
if name in row_fields:
# allow silent pass through of key fields
if not (name in mt.row_key and f._ir == mt[name]._ir):
raise ValueError(f"'{method}/pass_through': found duplicated field {repr(name)}")
row_fields[name] = f
for k in mt.row_key:
del row_fields[k]
return row_fields
@typecheck(y=oneof(expr_float64, sequenceof(expr_float64), sequenceof(sequenceof(expr_float64))),
x=expr_float64,
covariates=sequenceof(expr_float64),
block_size=int,
pass_through=sequenceof(oneof(str, Expression)))
def linear_regression_rows(y, x, covariates, block_size=16, pass_through=()) -> hail.Table:
r"""For each row, test an input variable for association with
response variables using linear regression.
Examples
--------
>>> result_ht = hl.linear_regression_rows(
... y=dataset.pheno.height,
... x=dataset.GT.n_alt_alleles(),
... covariates=[1, dataset.pheno.age, dataset.pheno.is_female])
Warning
-------
As in the example, the intercept covariate ``1`` must be
included **explicitly** if desired.
Warning
-------
If `y` is a single value or a list, :func:`.linear_regression_rows`
considers the same set of columns (i.e., samples, points) for every response
variable and row, namely those columns for which **all** response variables
and covariates are defined.
If `y` is a list of lists, then each inner list is treated as an
independent group, subsetting columns for missingness separately.
Notes
-----
With the default root and `y` a single expression, the following row-indexed
fields are added.
- **<row key fields>** (Any) -- Row key fields.
- **<pass_through fields>** (Any) -- Row fields in `pass_through`.
- **n** (:py:data:`.tint32`) -- Number of columns used.
- **sum_x** (:py:data:`.tfloat64`) -- Sum of input values `x`.
- **y_transpose_x** (:py:data:`.tfloat64`) -- Dot product of response
vector `y` with the input vector `x`.
- **beta** (:py:data:`.tfloat64`) --
Fit effect coefficient of `x`, :math:`\hat\beta_1` below.
- **standard_error** (:py:data:`.tfloat64`) --
Estimated standard error, :math:`\widehat{\mathrm{se}}_1`.
- **t_stat** (:py:data:`.tfloat64`) -- :math:`t`-statistic, equal to
:math:`\hat\beta_1 / \widehat{\mathrm{se}}_1`.
- **p_value** (:py:data:`.tfloat64`) -- :math:`p`-value.
If `y` is a list of expressions, then the last five fields instead have type
:class:`.tarray` of :py:data:`.tfloat64`, with corresponding indexing of
the list and each array.
If `y` is a list of lists of expressions, then `n` and `sum_x` are of type
``array<float64>``, and the last five fields are of type
``array<array<float64>>``. Index into these arrays with
``a[index_in_outer_list, index_in_inner_list]``. For example, if
``y=[[a], [b, c]]`` then the p-value for ``b`` is ``p_value[1][0]``.
In the statistical genetics example above, the input variable `x` encodes
genotype as the number of alternate alleles (0, 1, or 2). For each variant
(row), genotype is tested for association with height controlling for age
and sex, by fitting the linear regression model:
.. math::
\mathrm{height} = \beta_0 + \beta_1 \, \mathrm{genotype}
+ \beta_2 \, \mathrm{age}
+ \beta_3 \, \mathrm{is\_female}
+ \varepsilon,
\quad
\varepsilon \sim \mathrm{N}(0, \sigma^2)
Boolean covariates like :math:`\mathrm{is\_female}` are encoded as 1 for
``True`` and 0 for ``False``. The null model sets :math:`\beta_1 = 0`.
The standard least-squares linear regression model is derived in Section
3.2 of `The Elements of Statistical Learning, 2nd Edition
<http://statweb.stanford.edu/~tibs/ElemStatLearn/printings/ESLII_print10.pdf>`__.
See equation 3.12 for the t-statistic which follows the t-distribution with
:math:`n - k - 1` degrees of freedom, under the null hypothesis of no
effect, with :math:`n` samples and :math:`k` covariates in addition to
``x``.
Note
----
Use the `pass_through` parameter to include additional row fields from
matrix table underlying ``x``. For example, to include an "rsid" field, set
``pass_through=['rsid']`` or ``pass_through=[mt.rsid]``.
Parameters
----------
y : :class:`.Float64Expression` or :obj:`list` of :class:`.Float64Expression`
One or more column-indexed response expressions.
x : :class:`.Float64Expression`
Entry-indexed expression for input variable.
covariates : :obj:`list` of :class:`.Float64Expression`
List of column-indexed covariate expressions.
block_size : :obj:`int`
Number of row regressions to perform simultaneously per core. Larger blocks
require more memory but may improve performance.
pass_through : :obj:`list` of :class:`str` or :class:`.Expression`
Additional row fields to include in the resulting table.
Returns
-------
:class:`.Table`
"""
if not isinstance(Env.backend(), SparkBackend):
return _linear_regression_rows_nd(y, x, covariates, block_size, pass_through)
mt = matrix_table_source('linear_regression_rows/x', x)
check_entry_indexed('linear_regression_rows/x', x)
y_is_list = isinstance(y, list)
if y_is_list and len(y) == 0:
raise ValueError("'linear_regression_rows': found no values for 'y'")
is_chained = y_is_list and isinstance(y[0], list)
if is_chained and any(len(lst) == 0 for lst in y):
raise ValueError("'linear_regression_rows': found empty inner list for 'y'")
y = wrap_to_list(y)
for e in (itertools.chain.from_iterable(y) if is_chained else y):
analyze('linear_regression_rows/y', e, mt._col_indices)
for e in covariates:
analyze('linear_regression_rows/covariates', e, mt._col_indices)
_warn_if_no_intercept('linear_regression_rows', covariates)
x_field_name = Env.get_uid()
if is_chained:
y_field_names = [[f'__y_{i}_{j}' for j in range(len(y[i]))] for i in range(len(y))]
y_dict = dict(zip(itertools.chain.from_iterable(y_field_names), itertools.chain.from_iterable(y)))
func = 'LinearRegressionRowsChained'
else:
y_field_names = list(f'__y_{i}' for i in range(len(y)))
y_dict = dict(zip(y_field_names, y))
func = 'LinearRegressionRowsSingle'
cov_field_names = list(f'__cov{i}' for i in range(len(covariates)))
row_fields = _get_regression_row_fields(mt, pass_through, 'linear_regression_rows')
# FIXME: selecting an existing entry field should be emitted as a SelectFields
mt = mt._select_all(col_exprs=dict(**y_dict,
**dict(zip(cov_field_names, covariates))),
row_exprs=row_fields,
col_key=[],
entry_exprs={x_field_name: x})
config = {
'name': func,
'yFields': y_field_names,
'xField': x_field_name,
'covFields': cov_field_names,
'rowBlockSize': block_size,
'passThrough': [x for x in row_fields if x not in mt.row_key]
}
ht_result = Table(ir.MatrixToTableApply(mt._mir, config))
if not y_is_list:
fields = ['y_transpose_x', 'beta', 'standard_error', 't_stat', 'p_value']
ht_result = ht_result.annotate(**{f: ht_result[f][0] for f in fields})
return ht_result.persist()
@typecheck(y=oneof(expr_float64, sequenceof(expr_float64), sequenceof(sequenceof(expr_float64))),
x=expr_float64,
covariates=sequenceof(expr_float64),
block_size=int,
pass_through=sequenceof(oneof(str, Expression)))
def _linear_regression_rows_nd(y, x, covariates, block_size=16, pass_through=()) -> hail.Table:
mt = matrix_table_source('linear_regression_rows_nd/x', x)
check_entry_indexed('linear_regression_rows_nd/x', x)
y_is_list = isinstance(y, list)
if y_is_list and len(y) == 0:
raise ValueError("'linear_regression_rows_nd': found no values for 'y'")
is_chained = y_is_list and isinstance(y[0], list)
if is_chained and any(len(lst) == 0 for lst in y):
raise ValueError("'linear_regression_rows': found empty inner list for 'y'")
y = wrap_to_list(y)
for e in (itertools.chain.from_iterable(y) if is_chained else y):
analyze('linear_regression_rows_nd/y', e, mt._col_indices)
for e in covariates:
analyze('linear_regression_rows_nd/covariates', e, mt._col_indices)
_warn_if_no_intercept('linear_regression_rows_nd', covariates)
x_field_name = Env.get_uid()
if is_chained:
y_field_name_groups = [[f'__y_{i}_{j}' for j in range(len(y[i]))] for i in range(len(y))]
y_dict = dict(zip(itertools.chain.from_iterable(y_field_name_groups), itertools.chain.from_iterable(y)))
else:
y_field_name_groups = list(f'__y_{i}' for i in range(len(y)))
y_dict = dict(zip(y_field_name_groups, y))
# Wrapping in a list since the code is written for the more general chained case.
y_field_name_groups = [y_field_name_groups]
cov_field_names = list(f'__cov{i}' for i in range(len(covariates)))
row_field_names = _get_regression_row_fields(mt, pass_through, 'linear_regression_rows_nd')
# FIXME: selecting an existing entry field should be emitted as a SelectFields
mt = mt._select_all(col_exprs=dict(**y_dict,
**dict(zip(cov_field_names, covariates))),
row_exprs=row_field_names,
col_key=[],
entry_exprs={x_field_name: x})
entries_field_name = 'ent'
sample_field_name = "by_sample"
num_y_lists = len(y_field_name_groups)
# Given a hail array, get the mean of the nonmissing entries and
# return new array where the missing entries are the mean.
def mean_impute(hl_array):
non_missing_mean = hl.mean(hl_array, filter_missing=True)
return hl_array.map(lambda entry: hl.if_else(hl.is_defined(entry), entry, non_missing_mean))
def select_array_indices(hl_array, indices):
return indices.map(lambda i: hl_array[i])
def dot_rows_with_themselves(matrix):
return (matrix * matrix).sum(1)
def no_missing(hail_array):
return hail_array.all(lambda element: hl.is_defined(element))
ht_local = mt._localize_entries(entries_field_name, sample_field_name)
ht = ht_local.transmute(**{entries_field_name: ht_local[entries_field_name][x_field_name]})
def setup_globals(ht):
# cov_arrays is per sample, then per cov.
if covariates:
ht = ht.annotate_globals(cov_arrays=ht[sample_field_name].map(lambda sample_struct: [sample_struct[cov_name] for cov_name in cov_field_names]))
else:
ht = ht.annotate_globals(cov_arrays=ht[sample_field_name].map(lambda sample_struct: hl.empty_array(hl.tfloat64)))
ht = ht.annotate_globals(
y_arrays_per_group=[ht[sample_field_name].map(lambda sample_struct: [sample_struct[y_name] for y_name in one_y_field_name_set]) for one_y_field_name_set in y_field_name_groups]
)
all_covs_defined = ht.cov_arrays.map(lambda sample_covs: no_missing(sample_covs))
def get_kept_samples(sample_ys):
# sample_ys is an array of samples, with each element being an array of the y_values
return hl.enumerate(sample_ys).filter(
lambda idx_and_y_values: all_covs_defined[idx_and_y_values[0]] & no_missing(idx_and_y_values[1])
).map(lambda idx_and_y_values: idx_and_y_values[0])
kept_samples = ht.y_arrays_per_group.map(get_kept_samples)
y_nds = hl.zip(kept_samples, ht.y_arrays_per_group).map(lambda sample_indices_and_y_arrays:
hl.nd.array(sample_indices_and_y_arrays[0].map(lambda idx:
sample_indices_and_y_arrays[1][idx])))
cov_nds = kept_samples.map(lambda group: hl.nd.array(group.map(lambda idx: ht.cov_arrays[idx])))
k = builtins.len(covariates)
ns = kept_samples.map(lambda one_sample_set: hl.len(one_sample_set))
cov_Qts = hl.if_else(k > 0,
cov_nds.map(lambda one_cov_nd: hl.nd.qr(one_cov_nd)[0].T),
ns.map(lambda n: hl.nd.zeros((0, n))))
Qtys = hl.zip(cov_Qts, y_nds).map(lambda cov_qt_and_y: cov_qt_and_y[0] @ cov_qt_and_y[1])
return ht.annotate_globals(
kept_samples=kept_samples,
__y_nds=y_nds,
ns=ns,
ds=ns.map(lambda n: n - k - 1),
__cov_Qts=cov_Qts,
__Qtys=Qtys,
__yyps=hl.range(num_y_lists).map(lambda i: dot_rows_with_themselves(y_nds[i].T) - dot_rows_with_themselves(Qtys[i].T)))
ht = setup_globals(ht)
def process_block(block):
rows_in_block = hl.len(block)
# Processes one block group based on given idx. Returns a single struct.
def process_y_group(idx):
X = hl.nd.array(block[entries_field_name].map(lambda row: mean_impute(select_array_indices(row, ht.kept_samples[idx])))).T
n = ht.ns[idx]
sum_x = X.sum(0)
Qtx = ht.__cov_Qts[idx] @ X
ytx = ht.__y_nds[idx].T @ X
xyp = ytx - (ht.__Qtys[idx].T @ Qtx)
xxpRec = (dot_rows_with_themselves(X.T) - dot_rows_with_themselves(Qtx.T)).map(lambda entry: 1 / entry)
b = xyp * xxpRec
se = ((1.0 / ht.ds[idx]) * (ht.__yyps[idx].reshape((-1, 1)) @ xxpRec.reshape((1, -1)) - (b * b))).map(lambda entry: hl.sqrt(entry))
t = b / se
return hl.rbind(t, lambda t:
hl.rbind(ht.ds[idx], lambda d:
hl.rbind(t.map(lambda entry: 2 * hl.expr.functions.pT(-hl.abs(entry), d, True, False)), lambda p:
hl.struct(n=hl.range(rows_in_block).map(lambda i: n), sum_x=sum_x._data_array(),
y_transpose_x=ytx.T._data_array(), beta=b.T._data_array(),
standard_error=se.T._data_array(), t_stat=t.T._data_array(),
p_value=p.T._data_array()))))
per_y_list = hl.range(num_y_lists).map(lambda i: process_y_group(i))
key_field_names = [key_field for key_field in ht.key]
def build_row(row_idx):
# For every field we care about, map across all y's, getting the row_idxth one from each.
idxth_keys = {field_name: block[field_name][row_idx] for field_name in key_field_names}
computed_row_field_names = ['n', 'sum_x', 'y_transpose_x', 'beta', 'standard_error', 't_stat', 'p_value']
computed_row_fields = {
field_name: per_y_list.map(lambda one_y: one_y[field_name][row_idx]) for field_name in computed_row_field_names
}
pass_through_rows = {
field_name: block[field_name][row_idx] for field_name in row_field_names
}
if not is_chained:
computed_row_fields = {key: value[0] for key, value in computed_row_fields.items()}
return hl.struct(**{**idxth_keys, **computed_row_fields, **pass_through_rows})
new_rows = hl.range(rows_in_block).map(build_row)
return new_rows
def process_partition(part):
grouped = part.grouped(block_size)
return grouped.flatmap(lambda block: process_block(block))
res = ht._map_partitions(process_partition)
if not y_is_list:
fields = ['y_transpose_x', 'beta', 'standard_error', 't_stat', 'p_value']
res = res.annotate(**{f: res[f][0] for f in fields})
res = res.select_globals()
return res
@typecheck(test=enumeration('wald', 'lrt', 'score', 'firth'),
y=oneof(expr_float64, sequenceof(expr_float64)),
x=expr_float64,
covariates=sequenceof(expr_float64),
pass_through=sequenceof(oneof(str, Expression)))
def logistic_regression_rows(test, y, x, covariates, pass_through=()) -> hail.Table:
r"""For each row, test an input variable for association with a
binary response variable using logistic regression.
Examples
--------
Run the logistic regression Wald test per variant using a Boolean
phenotype, intercept and two covariates stored in column-indexed
fields:
>>> result_ht = hl.logistic_regression_rows(
... test='wald',
... y=dataset.pheno.is_case,
... x=dataset.GT.n_alt_alleles(),
... covariates=[1, dataset.pheno.age, dataset.pheno.is_female])
Run the logistic regression Wald test per variant using a list of binary (0/1)
phenotypes, intercept and two covariates stored in column-indexed
fields:
>>> result_ht = hl.logistic_regression_rows(
... test='wald',
... y=[dataset.pheno.is_case, dataset.pheno.is_case], # where pheno values are 0, 1, or missing
... x=dataset.GT.n_alt_alleles(),
... covariates=[1, dataset.pheno.age, dataset.pheno.is_female])
Warning
-------
:func:`.logistic_regression_rows` considers the same set of
columns (i.e., samples, points) for every row, namely those columns for
which **all** response variables and covariates are defined. For each row, missing values of
`x` are mean-imputed over these columns. As in the example, the
intercept covariate ``1`` must be included **explicitly** if desired.
Notes
-----
This method performs, for each row, a significance test of the input
variable in predicting a binary (case-control) response variable based
on the logistic regression model. The response variable type must either
be numeric (with all present values 0 or 1) or Boolean, in which case
true and false are coded as 1 and 0, respectively.
Hail supports the Wald test ('wald'), likelihood ratio test ('lrt'),
Rao score test ('score'), and Firth test ('firth'). Hail only includes
columns for which the response variable and all covariates are defined.
For each row, Hail imputes missing input values as the mean of the
non-missing values.
The example above considers a model of the form
.. math::
\mathrm{Prob}(\mathrm{is\_case}) =
\mathrm{sigmoid}(\beta_0 + \beta_1 \, \mathrm{gt}
+ \beta_2 \, \mathrm{age}
+ \beta_3 \, \mathrm{is\_female} + \varepsilon),
\quad
\varepsilon \sim \mathrm{N}(0, \sigma^2)
where :math:`\mathrm{sigmoid}` is the `sigmoid function`_, the genotype
:math:`\mathrm{gt}` is coded as 0 for HomRef, 1 for Het, and 2 for
HomVar, and the Boolean covariate :math:`\mathrm{is\_female}` is coded as
for ``True`` (female) and 0 for ``False`` (male). The null model sets
:math:`\beta_1 = 0`.
.. _sigmoid function: https://en.wikipedia.org/wiki/Sigmoid_function
The structure of the emitted row field depends on the test statistic as
shown in the tables below.
========== ================== ======= ============================================
Test Field Type Value
========== ================== ======= ============================================
Wald `beta` float64 fit effect coefficient,
:math:`\hat\beta_1`
Wald `standard_error` float64 estimated standard error,
:math:`\widehat{\mathrm{se}}`
Wald `z_stat` float64 Wald :math:`z`-statistic, equal to
:math:`\hat\beta_1 / \widehat{\mathrm{se}}`
Wald `p_value` float64 Wald p-value testing :math:`\beta_1 = 0`
LRT, Firth `beta` float64 fit effect coefficient,
:math:`\hat\beta_1`
LRT, Firth `chi_sq_stat` float64 deviance statistic
LRT, Firth `p_value` float64 LRT / Firth p-value testing
:math:`\beta_1 = 0`
Score `chi_sq_stat` float64 score statistic
Score `p_value` float64 score p-value testing :math:`\beta_1 = 0`
========== ================== ======= ============================================
For the Wald and likelihood ratio tests, Hail fits the logistic model for
each row using Newton iteration and only emits the above fields
when the maximum likelihood estimate of the coefficients converges. The
Firth test uses a modified form of Newton iteration. To help diagnose
convergence issues, Hail also emits three fields which summarize the
iterative fitting process:
================ =================== ======= ===============================
Test Field Type Value
================ =================== ======= ===============================
Wald, LRT, Firth `fit.n_iterations` int32 number of iterations until
convergence, explosion, or
reaching the max (25 for
Wald, LRT; 100 for Firth)
Wald, LRT, Firth `fit.converged` bool ``True`` if iteration converged
Wald, LRT, Firth `fit.exploded` bool ``True`` if iteration exploded
================ =================== ======= ===============================
We consider iteration to have converged when every coordinate of
:math:`\beta` changes by less than :math:`10^{-6}`. For Wald and LRT,
up to 25 iterations are attempted; in testing we find 4 or 5 iterations
nearly always suffice. Convergence may also fail due to explosion,
which refers to low-level numerical linear algebra exceptions caused by
manipulating ill-conditioned matrices. Explosion may result from (nearly)
linearly dependent covariates or complete separation_.
.. _separation: https://en.wikipedia.org/wiki/Separation_(statistics)
A more common situation in genetics is quasi-complete seperation, e.g.
variants that are observed only in cases (or controls). Such variants
inevitably arise when testing millions of variants with very low minor
allele count. The maximum likelihood estimate of :math:`\beta` under
logistic regression is then undefined but convergence may still occur
after a large number of iterations due to a very flat likelihood
surface. In testing, we find that such variants produce a secondary bump
from 10 to 15 iterations in the histogram of number of iterations per
variant. We also find that this faux convergence produces large standard
errors and large (insignificant) p-values. To not miss such variants,
consider using Firth logistic regression, linear regression, or
group-based tests.
Here's a concrete illustration of quasi-complete seperation in R. Suppose
we have 2010 samples distributed as follows for a particular variant:
======= ====== === ======
Status HomRef Het HomVar
======= ====== === ======
Case 1000 10 0
Control 1000 0 0
======= ====== === ======
The following R code fits the (standard) logistic, Firth logistic,
and linear regression models to this data, where ``x`` is genotype,
``y`` is phenotype, and ``logistf`` is from the logistf package:
.. code-block:: R
x <- c(rep(0,1000), rep(1,1000), rep(1,10)
y <- c(rep(0,1000), rep(0,1000), rep(1,10))
logfit <- glm(y ~ x, family=binomial())
firthfit <- logistf(y ~ x)
linfit <- lm(y ~ x)
The resulting p-values for the genotype coefficient are 0.991, 0.00085,
and 0.0016, respectively. The erroneous value 0.991 is due to
quasi-complete separation. Moving one of the 10 hets from case to control
eliminates this quasi-complete separation; the p-values from R are then
0.0373, 0.0111, and 0.0116, respectively, as expected for a less
significant association.
The Firth test reduces bias from small counts and resolves the issue of
separation by penalizing maximum likelihood estimation by the `Jeffrey's
invariant prior <https://en.wikipedia.org/wiki/Jeffreys_prior>`__. This
test is slower, as both the null and full model must be fit per variant,
and convergence of the modified Newton method is linear rather than
quadratic. For Firth, 100 iterations are attempted for the null model
and, if that is successful, for the full model as well. In testing we
find 20 iterations nearly always suffices. If the null model fails to
converge, then the `logreg.fit` fields reflect the null model;
otherwise, they reflect the full model.
See
`Recommended joint and meta-analysis strategies for case-control association testing of single low-count variants <http://www.ncbi.nlm.nih.gov/pmc/articles/PMC4049324/>`__
for an empirical comparison of the logistic Wald, LRT, score, and Firth
tests. The theoretical foundations of the Wald, likelihood ratio, and score
tests may be found in Chapter 3 of Gesine Reinert's notes
`Statistical Theory <http://www.stats.ox.ac.uk/~reinert/stattheory/theoryshort09.pdf>`__.
Firth introduced his approach in
`Bias reduction of maximum likelihood estimates, 1993 <http://www2.stat.duke.edu/~scs/Courses/Stat376/Papers/GibbsFieldEst/BiasReductionMLE.pdf>`__.
Heinze and Schemper further analyze Firth's approach in
`A solution to the problem of separation in logistic regression, 2002 <https://cemsiis.meduniwien.ac.at/fileadmin/msi_akim/CeMSIIS/KB/volltexte/Heinze_Schemper_2002_Statistics_in_Medicine.pdf>`__.
Hail's logistic regression tests correspond to the ``b.wald``,
``b.lrt``, and ``b.score`` tests in `EPACTS`_. For each variant, Hail
imputes missing input values as the mean of non-missing input values,
whereas EPACTS subsets to those samples with called genotypes. Hence,
Hail and EPACTS results will currently only agree for variants with no
missing genotypes.
.. _EPACTS: http://genome.sph.umich.edu/wiki/EPACTS#Single_Variant_Tests
Note
----
Use the `pass_through` parameter to include additional row fields from
matrix table underlying ``x``. For example, to include an "rsid" field, set
``pass_through=['rsid']`` or ``pass_through=[mt.rsid]``.
Parameters
----------
test : {'wald', 'lrt', 'score', 'firth'}
Statistical test.
y : :class:`.Float64Expression` or :obj:`list` of :class:`.Float64Expression`
One or more column-indexed response expressions.
All non-missing values must evaluate to 0 or 1.
Note that a :class:`.BooleanExpression` will be implicitly converted to
a :class:`.Float64Expression` with this property.
x : :class:`.Float64Expression`
Entry-indexed expression for input variable.
covariates : :obj:`list` of :class:`.Float64Expression`
Non-empty list of column-indexed covariate expressions.
pass_through : :obj:`list` of :class:`str` or :class:`.Expression`
Additional row fields to include in the resulting table.
Returns
-------
:class:`.Table`
"""
if len(covariates) == 0:
raise ValueError('logistic regression requires at least one covariate expression')
mt = matrix_table_source('logistic_regresion_rows/x', x)
check_entry_indexed('logistic_regresion_rows/x', x)
y_is_list = isinstance(y, list)
if y_is_list and len(y) == 0:
raise ValueError("'logistic_regression_rows': found no values for 'y'")
y = wrap_to_list(y)
for e in covariates:
analyze('logistic_regression_rows/covariates', e, mt._col_indices)
_warn_if_no_intercept('logistic_regression_rows', covariates)
x_field_name = Env.get_uid()
y_field = [f'__y_{i}' for i in range(len(y))]
y_dict = dict(zip(y_field, y))
cov_field_names = [f'__cov{i}' for i in range(len(covariates))]
row_fields = _get_regression_row_fields(mt, pass_through, 'logistic_regression_rows')
# FIXME: selecting an existing entry field should be emitted as a SelectFields
mt = mt._select_all(col_exprs=dict(**y_dict,
**dict(zip(cov_field_names, covariates))),
row_exprs=row_fields,
col_key=[],
entry_exprs={x_field_name: x})
config = {
'name': 'LogisticRegression',
'test': test,
'yFields': y_field,
'xField': x_field_name,
'covFields': cov_field_names,
'passThrough': [x for x in row_fields if x not in mt.row_key]
}
result = Table(ir.MatrixToTableApply(mt._mir, config))
if not y_is_list:
result = result.transmute(**result.logistic_regression[0])
return result.persist()
# Helpers for logreg:
def mean_impute(hl_array):
non_missing_mean = hl.mean(hl_array, filter_missing=True)
return hl_array.map(lambda entry: hl.if_else(hl.is_defined(entry), entry, non_missing_mean))
def sigmoid(hl_nd):
return hl_nd.map(lambda x: hl.if_else(x > 0, hl.rbind(hl.exp(x), lambda exped: exped / (exped + 1)), 1 / (1 + hl.exp(-x))))
def nd_max(hl_nd):
return hl.max(hl_nd.reshape(-1)._data_array())
def logreg_fit(X, y, null_fit=None, max_iter=25, tol=1E-6):
assert(X.ndim == 2)
assert(y.ndim == 1)
# X is samples by covs.
# y is length num samples, for one cov.
n = X.shape[0]
m = X.shape[1]
if null_fit is None:
avg = y.sum() / n
logit_avg = hl.log(avg / (1 - avg))
b = hl.nd.hstack([hl.nd.array([logit_avg]), hl.nd.zeros((hl.int32(m - 1)))])
mu = sigmoid(X @ b)
score = X.T @ (y - mu)
# Reshape so we do a rowwise multiply
fisher = X.T @ (X * (mu * (1 - mu)).reshape(-1, 1))
else:
# num covs used to fit null model.
m0 = null_fit.b.shape[0]
m_diff = m - m0
X0 = X[:, 0:m0]
X1 = X[:, m0:]
b = hl.nd.hstack([null_fit.b, hl.nd.zeros((m_diff,))])
mu = sigmoid(X @ b)
score = hl.nd.hstack([null_fit.score, X1.T @ (y - mu)])
fisher00 = null_fit.fisher
fisher01 = X0.T @ (X1 * (mu * (1 - mu)).reshape(-1, 1))
fisher10 = fisher01.T
fisher11 = X1.T @ (X1 * (mu * (1 - mu)).reshape(-1, 1))
fisher = hl.nd.vstack([
hl.nd.hstack([fisher00, fisher01]),
hl.nd.hstack([fisher10, fisher11])
])
# Useful type abbreviations
tvector64 = hl.tndarray(hl.tfloat64, 1)
tmatrix64 = hl.tndarray(hl.tfloat64, 2)
search_return_type = hl.tstruct(b=tvector64, score=tvector64, fisher=tmatrix64, num_iter=hl.tint32, log_lkhd=hl.tfloat64, converged=hl.tbool, exploded=hl.tbool)
def na(field_name):
return hl.missing(search_return_type[field_name])
# Need to do looping now.
def search(recur, cur_iter, b, mu, score, fisher):
delta_b_struct = hl.nd.solve(fisher, score, no_crash=True)
exploded = delta_b_struct.failed
delta_b = delta_b_struct.solution
max_delta_b = nd_max(delta_b.map(lambda e: hl.abs(e)))
log_lkhd = ((y * mu) + (1 - y) * (1 - mu)).map(lambda e: hl.log(e)).sum()
def compute_next_iter(cur_iter, b, mu, score, fisher):
cur_iter = cur_iter + 1
b = b + delta_b
mu = sigmoid(X @ b)
score = X.T @ (y - mu)
fisher = X.T @ (X * (mu * (1 - mu)).reshape(-1, 1))
return recur(cur_iter, b, mu, score, fisher)
return (hl.case()
.when(exploded | hl.is_nan(delta_b[0]), hl.struct(b=na('b'), score=na('score'), fisher=na('fisher'), num_iter=cur_iter, log_lkhd=log_lkhd, converged=False, exploded=True))
.when(cur_iter > max_iter, hl.struct(b=na('b'), score=na('score'), fisher=na('fisher'), num_iter=cur_iter, log_lkhd=log_lkhd, converged=False, exploded=False))
.when(max_delta_b < tol, hl.struct(b=b, score=score, fisher=fisher, num_iter=cur_iter, log_lkhd=log_lkhd, converged=True, exploded=False))
.default(compute_next_iter(cur_iter, b, mu, score, fisher)))
res_struct = hl.experimental.loop(search, search_return_type, 1, b, mu, score, fisher)
return res_struct
def wald_test(X, y, null_fit, link):
assert (link == "logistic")
fit = logreg_fit(X, y, null_fit)
se = hl.nd.diagonal(hl.nd.inv(fit.fisher)).map(lambda e: hl.sqrt(e))
z = fit.b / se
p = z.map(lambda e: 2 * hl.pnorm(-hl.abs(e)))
return hl.struct(
beta=fit.b[X.shape[1] - 1],
standard_error=se[X.shape[1] - 1],
z_stat=z[X.shape[1] - 1],
p_value=p[X.shape[1] - 1],
fit=hl.struct(n_iterations=fit.num_iter, converged=fit.converged, exploded=fit.exploded))
def lrt_test(X, y, null_fit, link):
assert (link == "logistic")
fit = logreg_fit(X, y, null_fit)
chi_sq = hl.if_else(~fit.converged, hl.missing(hl.tfloat64), 2 * (fit.log_lkhd - null_fit.log_lkhd))
p = hl.pchisqtail(chi_sq, X.shape[1] - null_fit.b.shape[0])
return hl.struct(
beta=fit.b[X.shape[1] - 1],
chi_sq_stat=chi_sq,
p_value=p,
fit=hl.struct(n_iterations=fit.num_iter, converged=fit.converged, exploded=fit.exploded))
@typecheck(test=enumeration('wald', 'lrt', 'score', 'firth'),
y=oneof(expr_float64, sequenceof(expr_float64)),
x=expr_float64,
covariates=sequenceof(expr_float64),
pass_through=sequenceof(oneof(str, Expression)))
def _logistic_regression_rows_nd(test, y, x, covariates, pass_through=()) -> hail.Table:
r"""For each row, test an input variable for association with a
binary response variable using logistic regression.
Examples
--------
Run the logistic regression Wald test per variant using a Boolean
phenotype, intercept and two covariates stored in column-indexed
fields:
>>> result_ht = hl.logistic_regression_rows(
... test='wald',
... y=dataset.pheno.is_case,
... x=dataset.GT.n_alt_alleles(),
... covariates=[1, dataset.pheno.age, dataset.pheno.is_female])
Run the logistic regression Wald test per variant using a list of binary (0/1)
phenotypes, intercept and two covariates stored in column-indexed
fields:
>>> result_ht = hl.logistic_regression_rows(
... test='wald',
... y=[dataset.pheno.is_case, dataset.pheno.is_case], # where pheno values are 0, 1, or missing
... x=dataset.GT.n_alt_alleles(),
... covariates=[1, dataset.pheno.age, dataset.pheno.is_female])
Warning
-------
:func:`.logistic_regression_rows` considers the same set of
columns (i.e., samples, points) for every row, namely those columns for
which **all** response variables and covariates are defined. For each row, missing values of
`x` are mean-imputed over these columns. As in the example, the
intercept covariate ``1`` must be included **explicitly** if desired.
Notes
-----
This method performs, for each row, a significance test of the input
variable in predicting a binary (case-control) response variable based
on the logistic regression model. The response variable type must either
be numeric (with all present values 0 or 1) or Boolean, in which case
true and false are coded as 1 and 0, respectively.
Hail supports the Wald test ('wald'), likelihood ratio test ('lrt'),
Rao score test ('score'), and Firth test ('firth'). Hail only includes
columns for which the response variable and all covariates are defined.
For each row, Hail imputes missing input values as the mean of the
non-missing values.
The example above considers a model of the form
.. math::
\mathrm{Prob}(\mathrm{is\_case}) =
\mathrm{sigmoid}(\beta_0 + \beta_1 \, \mathrm{gt}
+ \beta_2 \, \mathrm{age}
+ \beta_3 \, \mathrm{is\_female} + \varepsilon),
\quad
\varepsilon \sim \mathrm{N}(0, \sigma^2)
where :math:`\mathrm{sigmoid}` is the `sigmoid function`_, the genotype
:math:`\mathrm{gt}` is coded as 0 for HomRef, 1 for Het, and 2 for
HomVar, and the Boolean covariate :math:`\mathrm{is\_female}` is coded as
for ``True`` (female) and 0 for ``False`` (male). The null model sets
:math:`\beta_1 = 0`.
.. _sigmoid function: https://en.wikipedia.org/wiki/Sigmoid_function
The structure of the emitted row field depends on the test statistic as
shown in the tables below.
========== ================== ======= ============================================
Test Field Type Value
========== ================== ======= ============================================
Wald `beta` float64 fit effect coefficient,
:math:`\hat\beta_1`
Wald `standard_error` float64 estimated standard error,
:math:`\widehat{\mathrm{se}}`
Wald `z_stat` float64 Wald :math:`z`-statistic, equal to
:math:`\hat\beta_1 / \widehat{\mathrm{se}}`
Wald `p_value` float64 Wald p-value testing :math:`\beta_1 = 0`
LRT, Firth `beta` float64 fit effect coefficient,
:math:`\hat\beta_1`
LRT, Firth `chi_sq_stat` float64 deviance statistic
LRT, Firth `p_value` float64 LRT / Firth p-value testing
:math:`\beta_1 = 0`
Score `chi_sq_stat` float64 score statistic
Score `p_value` float64 score p-value testing :math:`\beta_1 = 0`
========== ================== ======= ============================================
For the Wald and likelihood ratio tests, Hail fits the logistic model for
each row using Newton iteration and only emits the above fields
when the maximum likelihood estimate of the coefficients converges. The
Firth test uses a modified form of Newton iteration. To help diagnose
convergence issues, Hail also emits three fields which summarize the
iterative fitting process:
================ =================== ======= ===============================
Test Field Type Value
================ =================== ======= ===============================
Wald, LRT, Firth `fit.n_iterations` int32 number of iterations until
convergence, explosion, or
reaching the max (25 for
Wald, LRT; 100 for Firth)
Wald, LRT, Firth `fit.converged` bool ``True`` if iteration converged
Wald, LRT, Firth `fit.exploded` bool ``True`` if iteration exploded
================ =================== ======= ===============================
We consider iteration to have converged when every coordinate of
:math:`\beta` changes by less than :math:`10^{-6}`. For Wald and LRT,
up to 25 iterations are attempted; in testing we find 4 or 5 iterations
nearly always suffice. Convergence may also fail due to explosion,
which refers to low-level numerical linear algebra exceptions caused by
manipulating ill-conditioned matrices. Explosion may result from (nearly)
linearly dependent covariates or complete separation_.
.. _separation: https://en.wikipedia.org/wiki/Separation_(statistics)
A more common situation in genetics is quasi-complete seperation, e.g.
variants that are observed only in cases (or controls). Such variants
inevitably arise when testing millions of variants with very low minor
allele count. The maximum likelihood estimate of :math:`\beta` under
logistic regression is then undefined but convergence may still occur
after a large number of iterations due to a very flat likelihood
surface. In testing, we find that such variants produce a secondary bump
from 10 to 15 iterations in the histogram of number of iterations per
variant. We also find that this faux convergence produces large standard
errors and large (insignificant) p-values. To not miss such variants,
consider using Firth logistic regression, linear regression, or
group-based tests.
Here's a concrete illustration of quasi-complete seperation in R. Suppose
we have 2010 samples distributed as follows for a particular variant:
======= ====== === ======
Status HomRef Het HomVar
======= ====== === ======
Case 1000 10 0
Control 1000 0 0
======= ====== === ======
The following R code fits the (standard) logistic, Firth logistic,
and linear regression models to this data, where ``x`` is genotype,
``y`` is phenotype, and ``logistf`` is from the logistf package:
.. code-block:: R
x <- c(rep(0,1000), rep(1,1000), rep(1,10)
y <- c(rep(0,1000), rep(0,1000), rep(1,10))
logfit <- glm(y ~ x, family=binomial())
firthfit <- logistf(y ~ x)
linfit <- lm(y ~ x)
The resulting p-values for the genotype coefficient are 0.991, 0.00085,
and 0.0016, respectively. The erroneous value 0.991 is due to
quasi-complete separation. Moving one of the 10 hets from case to control
eliminates this quasi-complete separation; the p-values from R are then
0.0373, 0.0111, and 0.0116, respectively, as expected for a less
significant association.
The Firth test reduces bias from small counts and resolves the issue of
separation by penalizing maximum likelihood estimation by the `Jeffrey's
invariant prior <https://en.wikipedia.org/wiki/Jeffreys_prior>`__. This
test is slower, as both the null and full model must be fit per variant,
and convergence of the modified Newton method is linear rather than
quadratic. For Firth, 100 iterations are attempted for the null model
and, if that is successful, for the full model as well. In testing we
find 20 iterations nearly always suffices. If the null model fails to
converge, then the `logreg.fit` fields reflect the null model;
otherwise, they reflect the full model.
See
`Recommended joint and meta-analysis strategies for case-control association testing of single low-count variants <http://www.ncbi.nlm.nih.gov/pmc/articles/PMC4049324/>`__
for an empirical comparison of the logistic Wald, LRT, score, and Firth
tests. The theoretical foundations of the Wald, likelihood ratio, and score
tests may be found in Chapter 3 of Gesine Reinert's notes
`Statistical Theory <http://www.stats.ox.ac.uk/~reinert/stattheory/theoryshort09.pdf>`__.
Firth introduced his approach in
`Bias reduction of maximum likelihood estimates, 1993 <http://www2.stat.duke.edu/~scs/Courses/Stat376/Papers/GibbsFieldEst/BiasReductionMLE.pdf>`__.
Heinze and Schemper further analyze Firth's approach in
`A solution to the problem of separation in logistic regression, 2002 <https://cemsiis.meduniwien.ac.at/fileadmin/msi_akim/CeMSIIS/KB/volltexte/Heinze_Schemper_2002_Statistics_in_Medicine.pdf>`__.
Hail's logistic regression tests correspond to the ``b.wald``,
``b.lrt``, and ``b.score`` tests in `EPACTS`_. For each variant, Hail
imputes missing input values as the mean of non-missing input values,
whereas EPACTS subsets to those samples with called genotypes. Hence,
Hail and EPACTS results will currently only agree for variants with no
missing genotypes.
.. _EPACTS: http://genome.sph.umich.edu/wiki/EPACTS#Single_Variant_Tests
Note
----
Use the `pass_through` parameter to include additional row fields from
matrix table underlying ``x``. For example, to include an "rsid" field, set
``pass_through=['rsid']`` or ``pass_through=[mt.rsid]``.
Parameters
----------
test : {'wald', 'lrt', 'score', 'firth'}
Statistical test.
y : :class:`.Float64Expression` or :obj:`list` of :class:`.Float64Expression`
One or more column-indexed response expressions.
All non-missing values must evaluate to 0 or 1.
Note that a :class:`.BooleanExpression` will be implicitly converted to
a :class:`.Float64Expression` with this property.
x : :class:`.Float64Expression`
Entry-indexed expression for input variable.
covariates : :obj:`list` of :class:`.Float64Expression`
Non-empty list of column-indexed covariate expressions.
pass_through : :obj:`list` of :class:`str` or :class:`.Expression`
Additional row fields to include in the resulting table.
Returns
-------
:class:`.Table`
"""
if len(covariates) == 0:
raise ValueError('logistic regression requires at least one covariate expression')
mt = matrix_table_source('logistic_regresion_rows/x', x)
check_entry_indexed('logistic_regresion_rows/x', x)
y_is_list = isinstance(y, list)
if y_is_list and len(y) == 0:
raise ValueError("'logistic_regression_rows': found no values for 'y'")
y = wrap_to_list(y)
for e in covariates:
analyze('logistic_regression_rows/covariates', e, mt._col_indices)
# _warn_if_no_intercept('logistic_regression_rows', covariates)
x_field_name = Env.get_uid()
y_field_names = [f'__y_{i}' for i in range(len(y))]
num_y_fields = len(y_field_names)
y_dict = dict(zip(y_field_names, y))
cov_field_names = [f'__cov{i}' for i in range(len(covariates))]
row_fields = _get_regression_row_fields(mt, pass_through, 'logistic_regression_rows')
# Handle filtering columns with missing values:
mt = mt.filter_cols(hl.array(y + covariates).all(hl.is_defined))
# FIXME: selecting an existing entry field should be emitted as a SelectFields
mt = mt._select_all(col_exprs=dict(**y_dict,
**dict(zip(cov_field_names, covariates))),
row_exprs=row_fields,
col_key=[],
entry_exprs={x_field_name: x})
sample_field_name = "samples"
ht = mt._localize_entries("entries", sample_field_name)
# cov_nd rows are samples, columns are the different covariates
if covariates:
ht = ht.annotate_globals(cov_nd=hl.nd.array(ht[sample_field_name].map(lambda sample_struct: [sample_struct[cov_name] for cov_name in cov_field_names])))
else:
ht = ht.annotate_globals(cov_nd=hl.nd.array(ht[sample_field_name].map(lambda sample_struct: hl.empty_array(hl.tfloat64))))
# y_nd rows are samples, columns are the various dependent variables.
ht = ht.annotate_globals(y_nd=hl.nd.array(ht[sample_field_name].map(lambda sample_struct: [sample_struct[y_name] for y_name in y_field_names])))
# Fit null models, which means doing a logreg fit with just the covariates for each phenotype.
null_models = hl.range(num_y_fields).map(lambda idx: logreg_fit(ht.cov_nd, ht.y_nd[:, idx]))
ht = ht.annotate_globals(nulls=null_models)
ht = ht.transmute(x=hl.nd.array(mean_impute(ht.entries[x_field_name])))
if test == "wald":
# For each y vector, need to do wald test.
covs_and_x = hl.nd.hstack([ht.cov_nd, ht.x.reshape((-1, 1))])
wald_structs = hl.range(num_y_fields).map(lambda idx: wald_test(covs_and_x, ht.y_nd[:, idx], ht.nulls[idx], "logistic"))
ht = ht.annotate(logistic_regression=wald_structs)
elif test == "lrt":
covs_and_x = hl.nd.hstack([ht.cov_nd, ht.x.reshape((-1, 1))])
lrt_structs = hl.range(num_y_fields).map(lambda idx: lrt_test(covs_and_x, ht.y_nd[:, idx], ht.nulls[idx], "logistic"))
ht = ht.annotate(logistic_regression=lrt_structs)
else:
raise ValueError("Only support wald and lrt so far")
if not y_is_list:
ht = ht.transmute(**ht.logistic_regression[0])
ht = ht.drop("x")
return ht
@typecheck(test=enumeration('wald', 'lrt', 'score'),
y=expr_float64,
x=expr_float64,
covariates=sequenceof(expr_float64),
pass_through=sequenceof(oneof(str, Expression)))
def poisson_regression_rows(test, y, x, covariates, pass_through=()) -> Table:
r"""For each row, test an input variable for association with a
count response variable using `Poisson regression <https://en.wikipedia.org/wiki/Poisson_regression>`__.
Notes
-----
See :func:`.logistic_regression_rows` for more info on statistical tests
of general linear models.
Note
----
Use the `pass_through` parameter to include additional row fields from
matrix table underlying ``x``. For example, to include an "rsid" field, set
``pass_through=['rsid']`` or ``pass_through=[mt.rsid]``.
Parameters
----------
y : :class:`.Float64Expression`
Column-indexed response expression.
All non-missing values must evaluate to a non-negative integer.
x : :class:`.Float64Expression`
Entry-indexed expression for input variable.
covariates : :obj:`list` of :class:`.Float64Expression`
Non-empty list of column-indexed covariate expressions.
pass_through : :obj:`list` of :class:`str` or :class:`.Expression`
Additional row fields to include in the resulting table.
Returns
-------
:class:`.Table`
"""
if len(covariates) == 0:
raise ValueError('Poisson regression requires at least one covariate expression')
mt = matrix_table_source('poisson_regression_rows/x', x)
check_entry_indexed('poisson_regression_rows/x', x)
analyze('poisson_regression_rows/y', y, mt._col_indices)
all_exprs = [y]
for e in covariates:
all_exprs.append(e)
analyze('poisson_regression_rows/covariates', e, mt._col_indices)
_warn_if_no_intercept('poisson_regression_rows', covariates)
x_field_name = Env.get_uid()
y_field_name = '__y'
cov_field_names = list(f'__cov{i}' for i in range(len(covariates)))
row_fields = _get_regression_row_fields(mt, pass_through, 'poisson_regression_rows')
# FIXME: selecting an existing entry field should be emitted as a SelectFields
mt = mt._select_all(col_exprs=dict(**{y_field_name: y},
**dict(zip(cov_field_names, covariates))),
row_exprs=row_fields,
col_key=[],
entry_exprs={x_field_name: x})
config = {
'name': 'PoissonRegression',
'test': test,
'yField': y_field_name,
'xField': x_field_name,
'covFields': cov_field_names,
'passThrough': [x for x in row_fields if x not in mt.row_key]
}
return Table(ir.MatrixToTableApply(mt._mir, config)).persist()
@typecheck(y=expr_float64,
x=sequenceof(expr_float64),
z_t=nullable(expr_float64),
k=nullable(np.ndarray),
p_path=nullable(str),
overwrite=bool,
standardize=bool,
mean_impute=bool)
def linear_mixed_model(y,
x,
z_t=None,
k=None,
p_path=None,
overwrite=False,
standardize=True,
mean_impute=True):
r"""Initialize a linear mixed model from a matrix table.
Examples
--------
Initialize a model using three fixed effects (including intercept) and
genetic marker random effects:
>>> marker_ds = dataset.filter_rows(dataset.use_as_marker) # doctest: +SKIP
>>> model, _ = hl.linear_mixed_model( # doctest: +SKIP
... y=marker_ds.pheno.height,
... x=[1, marker_ds.pheno.age, marker_ds.pheno.is_female],
... z_t=marker_ds.GT.n_alt_alleles(),
... p_path='output/p.bm')
Fit the model and examine :math:`h^2`:
>>> model.fit() # doctest: +SKIP
>>> model.h_sq # doctest: +SKIP
Sanity-check the normalized likelihood of :math:`h^2` over the percentile
grid:
>>> import matplotlib.pyplot as plt # doctest: +SKIP
>>> plt.plot(range(101), model.h_sq_normalized_lkhd()) # doctest: +SKIP
For this value of :math:`h^2`, test each variant for association:
>>> result_table = hl.linear_mixed_regression_rows(dataset.GT.n_alt_alleles(), model) # doctest: +SKIP
Alternatively, one can define a full-rank model using a pre-computed kinship
matrix :math:`K` in ndarray form. When :math:`K` is the realized
relationship matrix defined by the genetic markers, we obtain the same model
as above with :math:`P` written as a block matrix but returned as an
ndarray:
>>> rrm = hl.realized_relationship_matrix(marker_ds.GT).to_numpy() # doctest: +SKIP
>>> model, p = hl.linear_mixed_model( # doctest: +SKIP
... y=dataset.pheno.height,
... x=[1, dataset.pheno.age, dataset.pheno.is_female],
... k=rrm,
... p_path='output/p.bm',
... overwrite=True)
Notes
-----
See :class:`.LinearMixedModel` for details on the model and notation.
Exactly one of `z_t` and `k` must be set.
If `z_t` is set, the model is low-rank if the number of samples :math:`n` exceeds
the number of random effects :math:`m`. At least one dimension must be less
than or equal to 46300. If `standardize` is true, each random effect is first
standardized to have mean 0 and variance :math:`\frac{1}{m}`, so that the
diagonal values of the kinship matrix :math:`K = ZZ^T` are 1.0 in
expectation. This kinship matrix corresponds to the
:meth:`realized_relationship_matrix` in genetics. See
:meth:`.LinearMixedModel.from_random_effects` and :meth:`.BlockMatrix.svd`
for more details.
If `k` is set, the model is full-rank. For correct results, the indices of
`k` **must be aligned** with columns of the source of `y`.
Set `p_path` if you plan to use the model in :func:`.linear_mixed_regression_rows`.
`k` must be positive semi-definite; symmetry is not checked as only the
lower triangle is used. See :meth:`.LinearMixedModel.from_kinship` for more
details.
Missing, nan, or infinite values in `y` or `x` will raise an error.
If set, `z_t` may only have missing values if `mean_impute` is true, in
which case missing values of are set to the row mean. We recommend setting
`mean_impute` to false if you expect no missing values, both for performance
and as a sanity check.
Warning
-------
If the rows of the matrix table have been filtered to a small fraction,
then :meth:`.MatrixTable.repartition` before this method to improve
performance.
Parameters
----------
y: :class:`.Float64Expression`
Column-indexed expression for the observations (rows of :math:`y`).
Must have no missing values.
x: :obj:`list` of :class:`.Float64Expression`
Non-empty list of column-indexed expressions for the fixed effects (rows of :math:`X`).
Each expression must have the same source as `y` or no source
(e.g., the intercept ``1.0``).
Must have no missing values.
z_t: :class:`.Float64Expression`, optional
Entry-indexed expression for each mixed effect. These values are
row-standardized to variance :math:`1 / m` to form the entries of
:math:`Z^T`. If `mean_impute` is false, must have no missing values.
Exactly one of `z_t` and `k` must be set.
k: :class:`numpy.ndarray`, optional
Kinship matrix :math:`K`.
Exactly one of `z_t` and `k` must be set.
p_path: :class:`str`, optional
Path at which to write the projection :math:`P` as a block matrix.
Required if `z_t` is set.
overwrite: :obj:`bool`
If ``True``, overwrite an existing file at `p_path`.
standardize: :obj:`bool`
If ``True``, standardize `z_t` by row to mean 0 and variance
:math:`\frac{1}{m}`.
mean_impute: :obj:`bool`
If ``True``, mean-impute missing values of `z_t` by row.
Returns
-------
model: :class:`.LinearMixedModel`
Linear mixed model ready to be fit.
p: :class:`numpy.ndarray` or :class:`.BlockMatrix`
Matrix :math:`P` whose rows are the eigenvectors of :math:`K`.
The type is block matrix if the model is low rank (i.e., if `z_t` is set
and :math:`n > m`).
"""
source = matrix_table_source('linear_mixed_model/y', y)
if ((z_t is None and k is None)
or (z_t is not None and k is not None)):
raise ValueError("linear_mixed_model: set exactly one of 'z_t' and 'k'")
if len(x) == 0:
raise ValueError("linear_mixed_model: 'x' must include at least one fixed effect")
_warn_if_no_intercept('linear_mixed_model', x)
# collect x and y in one pass
mt = source.select_cols(xy=hl.array(x + [y])).key_cols_by()
xy = np.array(mt.xy.collect(), dtype=np.float64)
xy = xy.reshape(xy.size // (len(x) + 1), len(x) + 1)
x_nd = np.copy(xy[:, :-1])
y_nd = np.copy(xy[:, -1])
n = y_nd.size
del xy
if not np.all(np.isfinite(y_nd)):
raise ValueError("linear_mixed_model: 'y' has missing, nan, or infinite values")
if not np.all(np.isfinite(x_nd)):
raise ValueError("linear_mixed_model: 'x' has missing, nan, or infinite values")
if z_t is None:
model, p = LinearMixedModel.from_kinship(y_nd, x_nd, k, p_path, overwrite)
else:
check_entry_indexed('from_matrix_table: z_t', z_t)
if matrix_table_source('linear_mixed_model/z_t', z_t) != source:
raise ValueError("linear_mixed_model: 'y' and 'z_t' must "
"have the same source")
z_bm = BlockMatrix.from_entry_expr(z_t,
mean_impute=mean_impute,
center=standardize,
normalize=standardize).T # variance is 1 / n
m = z_bm.shape[1]
model, p = LinearMixedModel.from_random_effects(y_nd, x_nd, z_bm, p_path, overwrite)
if standardize:
model.s = model.s * (n / m) # now variance is 1 / m
if model.low_rank and isinstance(p, np.ndarray):
assert n > m
p = BlockMatrix.read(p_path)
return model, p
@typecheck(entry_expr=expr_float64,
model=LinearMixedModel,
pa_t_path=nullable(str),
a_t_path=nullable(str),
mean_impute=bool,
partition_size=nullable(int),
pass_through=sequenceof(oneof(str, Expression)))
def linear_mixed_regression_rows(entry_expr,
model,
pa_t_path=None,
a_t_path=None,
mean_impute=True,
partition_size=None,
pass_through=()):
"""For each row, test an input variable for association using a linear
mixed model.
Examples
--------
See the example in :meth:`linear_mixed_model` and section below on
efficiently testing multiple responses or sets of fixed effects.
Notes
-----
See :class:`.LinearMixedModel` for details on the model and notation.
This method packages up several steps for convenience:
1. Read the transformation :math:`P` from ``model.p_path``.
2. Write `entry_expr` at `a_t_path` as the block matrix :math:`A^T` with
block size that of :math:`P`. The parallelism is ``n_rows / block_size``.
3. Multiply and write :math:`A^T P^T` at `pa_t_path`. The parallelism is the
number of blocks in :math:`(PA)^T`, which equals
``(n_rows / block_size) * (model.r / block_size)``.
4. Compute regression results per row with
:meth:`.LinearMixedModel.fit_alternatives`.
The parallelism is ``n_rows / partition_size``.
If `pa_t_path` and `a_t_path` are not set, temporary files are used.
`entry_expr` may only have missing values if `mean_impute` is true, in
which case missing values of are set to the row mean. We recommend setting
`mean_impute` to false if you expect no missing values, both for performance
and as a sanity check.
**Efficiently varying the response or set of fixed effects**
Computing :math:`K`, :math:`P`, :math:`S`, :math:`A^T`, and especially the
product :math:`(PA)^T` may require significant compute when :math:`n` and/or
:math:`m` is large. However these quantities are all independent of the
response :math:`y` or fixed effects :math:`X`! And with the model
diagonalized, Step 4 above is fast and scalable.
So having run linear mixed regression once, we can
compute :math:`h^2` and regression statistics for another response or set of
fixed effects on the **same samples** at the roughly the speed of
:func:`.linear_regression_rows`.
For example, having collected another `y` and `x` as ndarrays, one can
construct a new linear mixed model directly.
Supposing the model is full-rank and `p` is an ndarray:
>>> model = hl.stats.LinearMixedModel(p @ y, p @ x, s) # doctest: +SKIP
>>> model.fit() # doctest: +SKIP
>>> result_ht = model.fit_alternatives(pa_t_path) # doctest: +SKIP
Supposing the model is low-rank and `p` is a block matrix:
>>> p = BlockMatrix.read(p_path) # doctest: +SKIP
>>> py, px = (p @ y).to_numpy(), (p @ x).to_numpy() # doctest: +SKIP
>>> model = LinearMixedModel(py, px, s, y, x) # doctest: +SKIP
>>> model.fit() # doctest: +SKIP
>>> result_ht = model.fit_alternatives(pa_t_path, a_t_path) # doctest: +SKIP
In either case, one can easily loop through many responses or conditional
analyses. To join results back to the matrix table:
>>> dataset = dataset.add_row_index() # doctest: +SKIP
>>> dataset = dataset.annotate_rows(lmmreg=result_ht[dataset.row_idx]]) # doctest: +SKIP
Warning
-------
For correct results, the column-index of `entry_expr` must correspond to the
sample index of the model. This will be true, for example, if `model`
was created with :func:`.linear_mixed_model` using (a possibly row-filtered
version of) the source of `entry_expr`, or if `y` and `x` were collected to
arrays from this source. Hail will raise an error if the number of columns
does not match ``model.n``, but will not detect, for example, permuted
samples.
The warning on :meth:`.BlockMatrix.write_from_entry_expr` applies to this
method when the number of samples is large.
Note
----
Use the `pass_through` parameter to include additional row fields from
matrix table underlying ``entry_expr``. For example, to include an "rsid"
field, set` pass_through=['rsid']`` or ``pass_through=[mt.rsid]``.
Parameters
----------
entry_expr: :class:`.Float64Expression`
Entry-indexed expression for input variable.
If mean_impute is false, must have no missing values.
model: :class:`.LinearMixedModel`
Fit linear mixed model with ``path_p`` set.
pa_t_path: :class:`str`, optional
Path at which to store the transpose of :math:`PA`.
If not set, a temporary file is used.
a_t_path: :class:`str`, optional
Path at which to store the transpose of :math:`A`.
If not set, a temporary file is used.
mean_impute: :obj:`bool`
Mean-impute missing values of `entry_expr` by row.
partition_size: :obj:`int`
Number of rows to process per partition.
Default given by block size of :math:`P`.
pass_through : :obj:`list` of :class:`str` or :class:`.Expression`
Additional row fields to include in the resulting table.
Returns
-------
:class:`.Table`
"""
mt = matrix_table_source('linear_mixed_regression_rows', entry_expr)
n = mt.count_cols()
check_entry_indexed('linear_mixed_regression_rows', entry_expr)
if not model._fitted:
raise ValueError("linear_mixed_regression_rows: 'model' has not been fit "
"using 'fit()'")
if model.p_path is None:
raise ValueError("linear_mixed_regression_rows: 'model' property 'p_path' "
"was not set at initialization")
if model.n != n:
raise ValueError(f"linear_mixed_regression_rows: linear mixed model expects {model.n} samples, "
f"\n but 'entry_expr' source has {n} columns.")
pa_t_path = new_temp_file() if pa_t_path is None else pa_t_path
a_t_path = new_temp_file() if a_t_path is None else a_t_path
p = BlockMatrix.read(model.p_path)
BlockMatrix.write_from_entry_expr(entry_expr,
a_t_path,
mean_impute=mean_impute,
block_size=p.block_size)
a_t = BlockMatrix.read(a_t_path)
(a_t @ p.T).write(pa_t_path, force_row_major=True)
ht = model.fit_alternatives(pa_t_path,
a_t_path if model.low_rank else None,
partition_size)
row_fields = _get_regression_row_fields(mt, pass_through, 'linear_mixed_regression_rows')
mt_keys = mt.select_rows(**row_fields).add_row_index('__row_idx').rows().add_index('__row_idx').key_by('__row_idx')
return mt_keys.annotate(**ht[mt_keys['__row_idx']]).key_by(*mt.row_key).drop('__row_idx')
@typecheck(key_expr=expr_any,
weight_expr=expr_float64,
y=expr_float64,
x=expr_float64,
covariates=sequenceof(expr_float64),
logistic=bool,
max_size=int,
accuracy=numeric,
iterations=int)
def skat(key_expr, weight_expr, y, x, covariates, logistic=False,
max_size=46340, accuracy=1e-6, iterations=10000) -> Table:
r"""Test each keyed group of rows for association by linear or logistic
SKAT test.
Examples
--------
Test each gene for association using the linear sequence kernel association
test:
>>> skat_table = hl.skat(key_expr=burden_ds.gene,
... weight_expr=burden_ds.weight,
... y=burden_ds.burden.pheno,
... x=burden_ds.GT.n_alt_alleles(),
... covariates=[1, burden_ds.burden.cov1, burden_ds.burden.cov2])
.. caution::
By default, the Davies algorithm iterates up to 10k times until an
accuracy of 1e-6 is achieved. Hence a reported p-value of zero with no
issues may truly be as large as 1e-6. The accuracy and maximum number of
iterations may be controlled by the corresponding function parameters.
In general, higher accuracy requires more iterations.
.. caution::
To process a group with :math:`m` rows, several copies of an
:math:`m \times m` matrix of doubles must fit in worker memory. Groups
with tens of thousands of rows may exhaust worker memory causing the
entire job to fail. In this case, use the `max_size` parameter to skip
groups larger than `max_size`.
Warning
-------
:func:`.skat` considers the same set of columns (i.e., samples, points) for
every group, namely those columns for which **all** covariates are defined.
For each row, missing values of `x` are mean-imputed over these columns.
As in the example, the intercept covariate ``1`` must be included
**explicitly** if desired.
Notes
-----
This method provides a scalable implementation of the score-based
variance-component test originally described in
`Rare-Variant Association Testing for Sequencing Data with the Sequence Kernel Association Test
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3135811/>`__.
Row weights must be non-negative. Rows with missing weights are ignored. In
the R package ``skat``---which assumes rows are variants---default weights
are given by evaluating the Beta(1, 25) density at the minor allele
frequency. To replicate these weights in Hail using alternate allele
frequencies stored in a row-indexed field `AF`, one can use the expression:
>>> hl.dbeta(hl.min(ds2.AF), 1.0, 25.0) ** 2
In the logistic case, the response `y` must either be numeric (with all
present values 0 or 1) or Boolean, in which case true and false are coded
as 1 and 0, respectively.
The resulting :class:`.Table` provides the group's key (`id`), thenumber of
rows in the group (`size`), the variance component score `q_stat`, the SKAT
`p-value`, and a `fault` flag. For the toy example above, the table has the
form:
+-------+------+--------+---------+-------+
| id | size | q_stat | p_value | fault |
+=======+======+========+=========+=======+
| geneA | 2 | 4.136 | 0.205 | 0 |
+-------+------+--------+---------+-------+
| geneB | 1 | 5.659 | 0.195 | 0 |
+-------+------+--------+---------+-------+
| geneC | 3 | 4.122 | 0.192 | 0 |
+-------+------+--------+---------+-------+
Groups larger than `max_size` appear with missing `q_stat`, `p_value`, and
`fault`. The hard limit on the number of rows in a group is 46340.
Note that the variance component score `q_stat` agrees with ``Q`` in the R
package ``skat``, but both differ from :math:`Q` in the paper by the factor
:math:`\frac{1}{2\sigma^2}` in the linear case and :math:`\frac{1}{2}` in
the logistic case, where :math:`\sigma^2` is the unbiased estimator of
residual variance for the linear null model. The R package also applies a
"small-sample adjustment" to the null distribution in the logistic case
when the sample size is less than 2000. Hail does not apply this
adjustment.
The fault flag is an integer indicating whether any issues occurred when
running the Davies algorithm to compute the p-value as the right tail of a
weighted sum of :math:`\chi^2(1)` distributions.
+-------------+-----------------------------------------+
| fault value | Description |
+=============+=========================================+
| 0 | no issues |
+------+------+-----------------------------------------+
| 1 | accuracy NOT achieved |
+------+------+-----------------------------------------+
| 2 | round-off error possibly significant |
+------+------+-----------------------------------------+
| 3 | invalid parameters |
+------+------+-----------------------------------------+
| 4 | unable to locate integration parameters |
+------+------+-----------------------------------------+
| 5 | out of memory |
+------+------+-----------------------------------------+
Parameters
----------
key_expr : :class:`.Expression`
Row-indexed expression for key associated to each row.
weight_expr : :class:`.Float64Expression`
Row-indexed expression for row weights.
y : :class:`.Float64Expression`
Column-indexed response expression.
If `logistic` is ``True``, all non-missing values must evaluate to 0 or
1. Note that a :class:`.BooleanExpression` will be implicitly converted
to a :class:`.Float64Expression` with this property.
x : :class:`.Float64Expression`
Entry-indexed expression for input variable.
covariates : :obj:`list` of :class:`.Float64Expression`
List of column-indexed covariate expressions.
logistic : :obj:`bool`
If true, use the logistic test rather than the linear test.
max_size : :obj:`int`
Maximum size of group on which to run the test.
accuracy : :obj:`float`
Accuracy achieved by the Davies algorithm if fault value is zero.
iterations : :obj:`int`
Maximum number of iterations attempted by the Davies algorithm.
Returns
-------
:class:`.Table`
Table of SKAT results.
"""
mt = matrix_table_source('skat/x', x)
check_entry_indexed('skat/x', x)
analyze('skat/key_expr', key_expr, mt._row_indices)
analyze('skat/weight_expr', weight_expr, mt._row_indices)
analyze('skat/y', y, mt._col_indices)
all_exprs = [key_expr, weight_expr, y]
for e in covariates:
all_exprs.append(e)
analyze('skat/covariates', e, mt._col_indices)
_warn_if_no_intercept('skat', covariates)
# FIXME: remove this logic when annotation is better optimized
if x in mt._fields_inverse:
x_field_name = mt._fields_inverse[x]
entry_expr = {}
else:
x_field_name = Env.get_uid()
entry_expr = {x_field_name: x}
y_field_name = '__y'
weight_field_name = '__weight'
key_field_name = '__key'
cov_field_names = list(f'__cov{i}' for i in range(len(covariates)))
mt = mt._select_all(col_exprs=dict(**{y_field_name: y},
**dict(zip(cov_field_names, covariates))),
row_exprs={weight_field_name: weight_expr,
key_field_name: key_expr},
entry_exprs=entry_expr)
config = {
'name': 'Skat',
'keyField': key_field_name,
'weightField': weight_field_name,
'xField': x_field_name,
'yField': y_field_name,
'covFields': cov_field_names,
'logistic': logistic,
'maxSize': max_size,
'accuracy': accuracy,
'iterations': iterations
}
return Table(ir.MatrixToTableApply(mt._mir, config))
@typecheck(p_value=expr_numeric,
approximate=bool)
def lambda_gc(p_value, approximate=True):
"""
Compute genomic inflation factor (lambda GC) from an Expression of p-values.
.. include:: ../_templates/experimental.rst
Parameters
----------
p_value : :class:`.NumericExpression`
Row-indexed numeric expression of p-values.
approximate : :obj:`bool`
If False, computes exact lambda GC (slower and uses more memory).
Returns
-------
:obj:`float`
Genomic inflation factor (lambda genomic control).
"""
check_row_indexed('lambda_gc', p_value)
t = table_source('lambda_gc', p_value)
med_chisq = _lambda_gc_agg(p_value, approximate)
return t.aggregate(med_chisq)
@typecheck(p_value=expr_numeric,
approximate=bool)
def _lambda_gc_agg(p_value, approximate=True):
chisq = hl.qchisqtail(p_value, 1)
if approximate:
med_chisq = hl.agg.filter(~hl.is_nan(p_value), hl.agg.approx_quantiles(chisq, 0.5))
else:
med_chisq = hl.agg.filter(~hl.is_nan(p_value), hl.median(hl.agg.collect(chisq)))
return med_chisq / hl.qchisqtail(0.5, 1)
@typecheck(ds=oneof(Table, MatrixTable),
keep_star=bool,
left_aligned=bool,
permit_shuffle=bool)
def split_multi(ds, keep_star=False, left_aligned=False, *, permit_shuffle=False):
"""Split multiallelic variants.
Warning
-------
In order to support a wide variety of data types, this function splits only
the variants on a :class:`.MatrixTable`, but **not the genotypes**. Use
:func:`.split_multi_hts` if possible, or split the genotypes yourself using
one of the entry modification methods: :meth:`.MatrixTable.annotate_entries`,
:meth:`.MatrixTable.select_entries`, :meth:`.MatrixTable.transmute_entries`.
The resulting dataset will be keyed by the split locus and alleles.
:func:`.split_multi` adds the following fields:
- `was_split` (*bool*) -- ``True`` if this variant was originally
multiallelic, otherwise ``False``.
- `a_index` (*int*) -- The original index of this alternate allele in the
multiallelic representation (NB: 1 is the first alternate allele or the
only alternate allele in a biallelic variant). For example, 1:100:A:T,C
splits into two variants: 1:100:A:T with ``a_index = 1`` and 1:100:A:C
with ``a_index = 2``.
- `old_locus` (*locus*) -- The original, unsplit locus.
- `old_alleles` (*array<str>*) -- The original, unsplit alleles.
All other fields are left unchanged.
Warning
-------
This method assumes `ds` contains one non-split variant per locus. This assumption permits the
most efficient implementation of this method.
If each locus in `ds` contains one multiallelic variant and one or more biallelic variants, you
can filter to the multiallelic variants, split those, and then combine the split variants with
the original biallelic variants.
For example, the following code splits a dataset `mt` which contains a mixture of split and
non-split variants.
>>> bi = mt.filter_rows(hl.len(mt.alleles) == 2)
>>> bi = bi.annotate_rows(was_split=False)
>>> multi = mt.filter_rows(hl.len(mt.alleles) > 2)
>>> split = hl.split_multi_hts(multi)
>>> mt = split.union_rows(bi)
Example
-------
:func:`.split_multi_hts`, which splits multiallelic variants for the HTS
genotype schema and updates the entry fields by downcoding the genotype, is
implemented as:
>>> sm = hl.split_multi(ds)
>>> pl = hl.or_missing(
... hl.is_defined(sm.PL),
... (hl.range(0, 3).map(lambda i: hl.min(hl.range(0, hl.len(sm.PL))
... .filter(lambda j: hl.downcode(hl.unphased_diploid_gt_index_call(j), sm.a_index) == hl.unphased_diploid_gt_index_call(i))
... .map(lambda j: sm.PL[j])))))
>>> split_ds = sm.annotate_entries(
... GT=hl.downcode(sm.GT, sm.a_index),
... AD=hl.or_missing(hl.is_defined(sm.AD),
... [hl.sum(sm.AD) - sm.AD[sm.a_index], sm.AD[sm.a_index]]),
... DP=sm.DP,
... PL=pl,
... GQ=hl.gq_from_pl(pl)).drop('old_locus', 'old_alleles')
See Also
--------
:func:`.split_multi_hts`
Parameters
----------
ds : :class:`.MatrixTable` or :class:`.Table`
An unsplit dataset.
keep_star : :obj:`bool`
Do not filter out * alleles.
left_aligned : :obj:`bool`
If ``True``, variants are assumed to be left aligned and have unique
loci. This avoids a shuffle. If the assumption is violated, an error
is generated.
permit_shuffle : :obj:`bool`
If ``True``, permit a data shuffle to sort out-of-order split results.
This will only be required if input data has duplicate loci, one of
which contains more than one alternate allele.
Returns
-------
:class:`.MatrixTable` or :class:`.Table`
"""
require_row_key_variant(ds, "split_multi")
new_id = Env.get_uid()
is_table = isinstance(ds, Table)
old_row = ds.row if is_table else ds._rvrow
kept_alleles = hl.range(1, hl.len(old_row.alleles))
if not keep_star:
kept_alleles = kept_alleles.filter(lambda i: old_row.alleles[i] != "*")
def new_struct(variant, i):
return hl.struct(alleles=variant.alleles,
locus=variant.locus,
a_index=i,
was_split=hl.len(old_row.alleles) > 2)
def split_rows(expr, rekey):
if isinstance(ds, MatrixTable):
mt = (ds.annotate_rows(**{new_id: expr})
.explode_rows(new_id))
if rekey:
mt = mt.key_rows_by()
else:
mt = mt.key_rows_by('locus')
new_row_expr = mt._rvrow.annotate(locus=mt[new_id]['locus'],
alleles=mt[new_id]['alleles'],
a_index=mt[new_id]['a_index'],
was_split=mt[new_id]['was_split'],
old_locus=mt.locus,
old_alleles=mt.alleles).drop(new_id)
mt = mt._select_rows('split_multi', new_row_expr)
if rekey:
return mt.key_rows_by('locus', 'alleles')
else:
return MatrixTable(ir.MatrixKeyRowsBy(mt._mir, ['locus', 'alleles'], is_sorted=True))
else:
assert isinstance(ds, Table)
ht = (ds.annotate(**{new_id: expr})
.explode(new_id))
if rekey:
ht = ht.key_by()
else:
ht = ht.key_by('locus')
new_row_expr = ht.row.annotate(locus=ht[new_id]['locus'],
alleles=ht[new_id]['alleles'],
a_index=ht[new_id]['a_index'],
was_split=ht[new_id]['was_split'],
old_locus=ht.locus,
old_alleles=ht.alleles).drop(new_id)
ht = ht._select('split_multi', new_row_expr)
if rekey:
return ht.key_by('locus', 'alleles')
else:
return Table(ir.TableKeyBy(ht._tir, ['locus', 'alleles'], is_sorted=True))
if left_aligned:
def make_struct(i):
def error_on_moved(v):
return (hl.case()
.when(v.locus == old_row.locus, new_struct(v, i))
.or_error("Found non-left-aligned variant in split_multi"))
return hl.bind(error_on_moved,
hl.min_rep(old_row.locus, [old_row.alleles[0], old_row.alleles[i]]))
return split_rows(hl.sorted(kept_alleles.map(make_struct)), permit_shuffle)
else:
def make_struct(i, cond):
def struct_or_empty(v):
return (hl.case()
.when(cond(v.locus), hl.array([new_struct(v, i)]))
.or_missing())
return hl.bind(struct_or_empty,
hl.min_rep(old_row.locus, [old_row.alleles[0], old_row.alleles[i]]))
def make_array(cond):
return hl.sorted(kept_alleles.flatmap(lambda i: make_struct(i, cond)))
left = split_rows(make_array(lambda locus: locus == ds['locus']), permit_shuffle)
moved = split_rows(make_array(lambda locus: locus != ds['locus']), True)
return left.union(moved) if is_table else left.union_rows(moved, _check_cols=False)
@typecheck(ds=oneof(Table, MatrixTable),
keep_star=bool,
left_aligned=bool,
vep_root=str,
permit_shuffle=bool)
def split_multi_hts(ds, keep_star=False, left_aligned=False, vep_root='vep', *, permit_shuffle=False):
"""Split multiallelic variants for datasets that contain one or more fields
from a standard high-throughput sequencing entry schema.
.. code-block:: text
struct {
GT: call,
AD: array<int32>,
DP: int32,
GQ: int32,
PL: array<int32>,
PGT: call,
PID: str
}
For other entry fields, write your own splitting logic using
:meth:`.MatrixTable.annotate_entries`.
Examples
--------
>>> hl.split_multi_hts(dataset).write('output/split.mt')
Warning
-------
This method assumes `ds` contains one non-split variant per locus. This assumption permits the
most efficient implementation of this method.
If each locus in `ds` contains one multiallelic variant and one or more biallelic variants, you
can filter to the multiallelic variants, split those, and then combine the split variants with
the original biallelic variants.
For example, the following code splits a dataset `mt` which contains a mixture of split and
non-split variants.
>>> bi = mt.filter_rows(hl.len(mt.alleles) == 2)
>>> bi = bi.annotate_rows(was_split=False)
>>> multi = mt.filter_rows(hl.len(mt.alleles) > 2)
>>> split = hl.split_multi_hts(multi)
>>> mt = split.union_rows(bi)
Notes
-----
We will explain by example. Consider a hypothetical 3-allelic
variant:
.. code-block:: text
A C,T 0/2:7,2,6:15:45:99,50,99,0,45,99
:func:`.split_multi_hts` will create two biallelic variants (one for each
alternate allele) at the same position
.. code-block:: text
A C 0/0:13,2:15:45:0,45,99
A T 0/1:9,6:15:50:50,0,99
Each multiallelic `GT` or `PGT` field is downcoded once for each alternate allele. A
call for an alternate allele maps to 1 in the biallelic variant
corresponding to itself and 0 otherwise. For example, in the example above,
0/2 maps to 0/0 and 0/1. The genotype 1/2 maps to 0/1 and 0/1.
The biallelic alt `AD` entry is just the multiallelic `AD` entry
corresponding to the alternate allele. The ref AD entry is the sum of the
other multiallelic entries.
The biallelic `DP` is the same as the multiallelic `DP`.
The biallelic `PL` entry for a genotype g is the minimum over `PL` entries
for multiallelic genotypes that downcode to g. For example, the `PL` for (A,
T) at 0/1 is the minimum of the PLs for 0/1 (50) and 1/2 (45), and thus 45.
Fixing an alternate allele and biallelic variant, downcoding gives a map
from multiallelic to biallelic alleles and genotypes. The biallelic `AD` entry
for an allele is just the sum of the multiallelic `AD` entries for alleles
that map to that allele. Similarly, the biallelic `PL` entry for a genotype is
the minimum over multiallelic `PL` entries for genotypes that map to that
genotype.
`GQ` is recomputed from `PL` if `PL` is provided and is not
missing. If not, it is copied from the original GQ.
Here is a second example for a het non-ref
.. code-block:: text
A C,T 1/2:2,8,6:16:45:99,50,99,45,0,99
splits as
.. code-block:: text
A C 0/1:8,8:16:45:45,0,99
A T 0/1:10,6:16:50:50,0,99
**VCF Info Fields**
Hail does not split fields in the info field. This means that if a
multiallelic site with `info.AC` value ``[10, 2]`` is split, each split
site will contain the same array ``[10, 2]``. The provided allele index
field `a_index` can be used to select the value corresponding to the split
allele's position:
>>> split_ds = hl.split_multi_hts(dataset)
>>> split_ds = split_ds.filter_rows(split_ds.info.AC[split_ds.a_index - 1] < 10,
... keep = False)
VCFs split by Hail and exported to new VCFs may be
incompatible with other tools, if action is not taken
first. Since the "Number" of the arrays in split multiallelic
sites no longer matches the structure on import ("A" for 1 per
allele, for example), Hail will export these fields with
number ".".
If the desired output is one value per site, then it is
possible to use annotate_variants_expr to remap these
values. Here is an example:
>>> split_ds = hl.split_multi_hts(dataset)
>>> split_ds = split_ds.annotate_rows(info = split_ds.info.annotate(AC = split_ds.info.AC[split_ds.a_index - 1]))
>>> hl.export_vcf(split_ds, 'output/export.vcf') # doctest: +SKIP
The info field AC in *data/export.vcf* will have ``Number=1``.
**New Fields**
:func:`.split_multi_hts` adds the following fields:
- `was_split` (*bool*) -- ``True`` if this variant was originally
multiallelic, otherwise ``False``.
- `a_index` (*int*) -- The original index of this alternate allele in the
multiallelic representation (NB: 1 is the first alternate allele or the
only alternate allele in a biallelic variant). For example, 1:100:A:T,C
splits into two variants: 1:100:A:T with ``a_index = 1`` and 1:100:A:C
with ``a_index = 2``.
See Also
--------
:func:`.split_multi`
Parameters
----------
ds : :class:`.MatrixTable` or :class:`.Table`
An unsplit dataset.
keep_star : :obj:`bool`
Do not filter out * alleles.
left_aligned : :obj:`bool`
If ``True``, variants are assumed to be left
aligned and have unique loci. This avoids a shuffle. If the assumption
is violated, an error is generated.
vep_root : :class:`str`
Top-level location of vep data. All variable-length VEP fields
(intergenic_consequences, motif_feature_consequences,
regulatory_feature_consequences, and transcript_consequences)
will be split properly (i.e. a_index corresponding to the VEP allele_num).
permit_shuffle : :obj:`bool`
If ``True``, permit a data shuffle to sort out-of-order split results.
This will only be required if input data has duplicate loci, one of
which contains more than one alternate allele.
Returns
-------
:class:`.MatrixTable` or :class:`.Table`
A biallelic variant dataset.
"""
split = split_multi(ds, keep_star=keep_star, left_aligned=left_aligned, permit_shuffle=permit_shuffle)
row_fields = set(ds.row)
update_rows_expression = {}
if vep_root in row_fields:
update_rows_expression[vep_root] = split[vep_root].annotate(**{
x: split[vep_root][x].filter(lambda csq: csq.allele_num == split.a_index)
for x in ('intergenic_consequences', 'motif_feature_consequences',
'regulatory_feature_consequences', 'transcript_consequences')})
if isinstance(ds, Table):
return split.annotate(**update_rows_expression).drop('old_locus', 'old_alleles')
split = split.annotate_rows(**update_rows_expression)
entry_fields = ds.entry
expected_field_types = {
'GT': hl.tcall,
'AD': hl.tarray(hl.tint),
'DP': hl.tint,
'GQ': hl.tint,
'PL': hl.tarray(hl.tint),
'PGT': hl.tcall,
'PID': hl.tstr
}
bad_fields = []
for field in entry_fields:
if field in expected_field_types and entry_fields[field].dtype != expected_field_types[field]:
bad_fields.append((field, entry_fields[field].dtype, expected_field_types[field]))
if bad_fields:
msg = '\n '.join([f"'{x[0]}'\tfound: {x[1]}\texpected: {x[2]}" for x in bad_fields])
raise TypeError("'split_multi_hts': Found invalid types for the following fields:\n " + msg)
update_entries_expression = {}
if 'GT' in entry_fields:
update_entries_expression['GT'] = hl.downcode(split.GT, split.a_index)
if 'DP' in entry_fields:
update_entries_expression['DP'] = split.DP
if 'AD' in entry_fields:
update_entries_expression['AD'] = hl.or_missing(hl.is_defined(split.AD),
[hl.sum(split.AD) - split.AD[split.a_index], split.AD[split.a_index]])
if 'PL' in entry_fields:
pl = hl.or_missing(
hl.is_defined(split.PL),
(hl.range(0, 3).map(lambda i:
hl.min((hl.range(0, hl.triangle(split.old_alleles.length()))
.filter(lambda j: hl.downcode(hl.unphased_diploid_gt_index_call(j),
split.a_index).unphased_diploid_gt_index() == i
).map(lambda j: split.PL[j]))))))
if 'GQ' in entry_fields:
update_entries_expression['PL'] = pl
update_entries_expression['GQ'] = hl.or_else(hl.gq_from_pl(pl), split.GQ)
else:
update_entries_expression['PL'] = pl
else:
if 'GQ' in entry_fields:
update_entries_expression['GQ'] = split.GQ
if 'PGT' in entry_fields:
update_entries_expression['PGT'] = hl.downcode(split.PGT, split.a_index)
if 'PID' in entry_fields:
update_entries_expression['PID'] = split.PID
return split.annotate_entries(**update_entries_expression).drop('old_locus', 'old_alleles')
@typecheck(call_expr=expr_call)
def genetic_relatedness_matrix(call_expr) -> BlockMatrix:
r"""Compute the genetic relatedness matrix (GRM).
Examples
--------
>>> grm = hl.genetic_relatedness_matrix(dataset.GT)
Notes
-----
The genetic relationship matrix (GRM) :math:`G` encodes genetic correlation
between each pair of samples. It is defined by :math:`G = MM^T` where
:math:`M` is a standardized version of the genotype matrix, computed as
follows. Let :math:`C` be the :math:`n \times m` matrix of raw genotypes
in the variant dataset, with rows indexed by :math:`n` samples and columns
indexed by :math:`m` bialellic autosomal variants; :math:`C_{ij}` is the
number of alternate alleles of variant :math:`j` carried by sample
:math:`i`, which can be 0, 1, 2, or missing. For each variant :math:`j`,
the sample alternate allele frequency :math:`p_j` is computed as half the
mean of the non-missing entries of column :math:`j`. Entries of :math:`M`
are then mean-centered and variance-normalized as
.. math::
M_{ij} = \frac{C_{ij}-2p_j}{\sqrt{2p_j(1-p_j)m}},
with :math:`M_{ij} = 0` for :math:`C_{ij}` missing (i.e. mean genotype
imputation). This scaling normalizes genotype variances to a common value
:math:`1/m` for variants in Hardy-Weinberg equilibrium and is further
motivated in the paper `Patterson, Price and Reich, 2006
<http://journals.plos.org/plosgenetics/article?id=10.1371/journal.pgen.0020190>`__.
(The resulting amplification of signal from the low end of the allele
frequency spectrum will also introduce noise for rare variants; common
practice is to filter out variants with minor allele frequency below some
cutoff.) The factor :math:`1/m` gives each sample row approximately unit
total variance (assuming linkage equilibrium) so that the diagonal entries
of the GRM are approximately 1. Equivalently,
.. math::
G_{ik} = \frac{1}{m} \sum_{j=1}^m \frac{(C_{ij}-2p_j)(C_{kj}-2p_j)}{2 p_j (1-p_j)}
This method drops variants with :math:`p_j = 0` or :math:`p_j = 1` before
computing kinship.
Parameters
----------
call_expr : :class:`.CallExpression`
Entry-indexed call expression with columns corresponding
to samples.
Returns
-------
:class:`.BlockMatrix`
Genetic relatedness matrix for all samples. Row and column indices
correspond to matrix table column index.
"""
mt = matrix_table_source('genetic_relatedness_matrix/call_expr', call_expr)
check_entry_indexed('genetic_relatedness_matrix/call_expr', call_expr)
mt = mt.select_entries(__gt=call_expr.n_alt_alleles()).unfilter_entries()
mt = mt.select_rows(__AC=agg.sum(mt.__gt),
__n_called=agg.count_where(hl.is_defined(mt.__gt)))
mt = mt.filter_rows((mt.__AC > 0) & (mt.__AC < 2 * mt.__n_called))
mt = mt.select_rows(__mean_gt=mt.__AC / mt.__n_called)
mt = mt.annotate_rows(__hwe_scaled_std_dev=hl.sqrt(mt.__mean_gt * (2 - mt.__mean_gt)))
normalized_gt = hl.or_else((mt.__gt - mt.__mean_gt) / mt.__hwe_scaled_std_dev, 0.0)
bm = BlockMatrix.from_entry_expr(normalized_gt)
return (bm.T @ bm) / (bm.n_rows / 2.0)
@typecheck(call_expr=expr_call)
def realized_relationship_matrix(call_expr) -> BlockMatrix:
r"""Computes the realized relationship matrix (RRM).
Examples
--------
>>> rrm = hl.realized_relationship_matrix(dataset.GT)
Notes
-----
The realized relationship matrix (RRM) is defined as follows. Consider the
:math:`n \times m` matrix :math:`C` of raw genotypes, with rows indexed by
:math:`n` samples and columns indexed by the :math:`m` bialellic autosomal
variants; :math:`C_{ij}` is the number of alternate alleles of variant
:math:`j` carried by sample :math:`i`, which can be 0, 1, 2, or missing. For
each variant :math:`j`, the sample alternate allele frequency :math:`p_j` is
computed as half the mean of the non-missing entries of column :math:`j`.
Entries of :math:`M` are then mean-centered and variance-normalized as
.. math::
M_{ij} =
\frac{C_{ij}-2p_j}
{\sqrt{\frac{m}{n} \sum_{k=1}^n (C_{ij}-2p_j)^2}},
with :math:`M_{ij} = 0` for :math:`C_{ij}` missing (i.e. mean genotype
imputation). This scaling normalizes each variant column to have empirical
variance :math:`1/m`, which gives each sample row approximately unit total
variance (assuming linkage equilibrium) and yields the :math:`n \times n`
sample correlation or realized relationship matrix (RRM) :math:`K` as simply
.. math::
K = MM^T
Note that the only difference between the realized relationship matrix and
the genetic relatedness matrix (GRM) used in
:func:`.realized_relationship_matrix` is the variant (column) normalization:
where RRM uses empirical variance, GRM uses expected variance under
Hardy-Weinberg Equilibrium.
This method drops variants with zero variance before computing kinship.
Parameters
----------
call_expr : :class:`.CallExpression`
Entry-indexed call expression on matrix table with columns corresponding
to samples.
Returns
-------
:class:`.BlockMatrix`
Realized relationship matrix for all samples. Row and column indices
correspond to matrix table column index.
"""
mt = matrix_table_source('realized_relationship_matrix/call_expr', call_expr)
check_entry_indexed('realized_relationship_matrix/call_expr', call_expr)
mt = mt.select_entries(__gt=call_expr.n_alt_alleles()).unfilter_entries()
mt = mt.select_rows(__AC=agg.sum(mt.__gt),
__ACsq=agg.sum(mt.__gt * mt.__gt),
__n_called=agg.count_where(hl.is_defined(mt.__gt)))
mt = mt.select_rows(__mean_gt=mt.__AC / mt.__n_called,
__centered_length=hl.sqrt(mt.__ACsq - (mt.__AC ** 2) / mt.__n_called))
fmt = mt.filter_rows(mt.__centered_length > 0.1) # truly non-zero values are at least sqrt(0.5)
normalized_gt = hl.or_else((fmt.__gt - fmt.__mean_gt) / fmt.__centered_length, 0.0)
try:
bm = BlockMatrix.from_entry_expr(normalized_gt)
return (bm.T @ bm) / (bm.n_rows / bm.n_cols)
except FatalError as fe:
raise FatalError("Could not convert MatrixTable to BlockMatrix. It's possible all variants were dropped by variance filter.\n"
"Check that the input MatrixTable has at least two samples in it: mt.count_cols().") from fe
@typecheck(entry_expr=expr_float64, block_size=nullable(int))
def row_correlation(entry_expr, block_size=None) -> BlockMatrix:
"""Computes the correlation matrix between row vectors.
Examples
--------
Consider the following dataset with three variants and four samples:
>>> data = [{'v': '1:1:A:C', 's': 'a', 'GT': hl.Call([0, 0])},
... {'v': '1:1:A:C', 's': 'b', 'GT': hl.Call([0, 0])},
... {'v': '1:1:A:C', 's': 'c', 'GT': hl.Call([0, 1])},
... {'v': '1:1:A:C', 's': 'd', 'GT': hl.Call([1, 1])},
... {'v': '1:2:G:T', 's': 'a', 'GT': hl.Call([0, 1])},
... {'v': '1:2:G:T', 's': 'b', 'GT': hl.Call([1, 1])},
... {'v': '1:2:G:T', 's': 'c', 'GT': hl.Call([0, 1])},
... {'v': '1:2:G:T', 's': 'd', 'GT': hl.Call([0, 0])},
... {'v': '1:3:C:G', 's': 'a', 'GT': hl.Call([0, 1])},
... {'v': '1:3:C:G', 's': 'b', 'GT': hl.Call([0, 0])},
... {'v': '1:3:C:G', 's': 'c', 'GT': hl.Call([1, 1])},
... {'v': '1:3:C:G', 's': 'd', 'GT': hl.missing(hl.tcall)}]
>>> ht = hl.Table.parallelize(data, hl.dtype('struct{v: str, s: str, GT: call}'))
>>> mt = ht.to_matrix_table(row_key=['v'], col_key=['s'])
Compute genotype correlation between all pairs of variants:
>>> ld = hl.row_correlation(mt.GT.n_alt_alleles())
>>> ld.to_numpy()
array([[ 1. , -0.85280287, 0.42640143],
[-0.85280287, 1. , -0.5 ],
[ 0.42640143, -0.5 , 1. ]])
Compute genotype correlation between consecutively-indexed variants:
>>> ld.sparsify_band(lower=0, upper=1).to_numpy()
array([[ 1. , -0.85280287, 0. ],
[ 0. , 1. , -0.5 ],
[ 0. , 0. , 1. ]])
Warning
-------
Rows with a constant value (i.e., zero variance) will result `nan`
correlation values. To avoid this, first check that all rows vary or filter
out constant rows (for example, with the help of :func:`.aggregators.stats`).
Notes
-----
In this method, each row of entries is regarded as a vector with elements
defined by `entry_expr` and missing values mean-imputed per row.
The ``(i, j)`` element of the resulting block matrix is the correlation
between rows ``i`` and ``j`` (as 0-indexed by order in the matrix table;
see :meth:`~hail.MatrixTable.add_row_index`).
The correlation of two vectors is defined as the
`Pearson correlation coeffecient <https://en.wikipedia.org/wiki/Pearson_correlation_coefficient>`__
between the corresponding empirical distributions of elements,
or equivalently as the cosine of the angle between the vectors.
This method has two stages:
- writing the row-normalized block matrix to a temporary file on persistent
disk with :meth:`.BlockMatrix.from_entry_expr`. The parallelism is
``n_rows / block_size``.
- reading and multiplying this block matrix by its transpose. The
parallelism is ``(n_rows / block_size)^2`` if all blocks are computed.
Warning
-------
See all warnings on :meth:`.BlockMatrix.from_entry_expr`. In particular,
for large matrices, it may be preferable to run the two stages separately,
saving the row-normalized block matrix to a file on external storage with
:meth:`.BlockMatrix.write_from_entry_expr`.
The resulting number of matrix elements is the square of the number of rows
in the matrix table, so computing the full matrix may be infeasible. For
example, ten million rows would produce 800TB of float64 values. The
block-sparse representation on BlockMatrix may be used to work efficiently
with regions of such matrices, as in the second example above and
:meth:`ld_matrix`.
To prevent excessive re-computation, be sure to write and read the (possibly
block-sparsified) result before multiplication by another matrix.
Parameters
----------
entry_expr : :class:`.Float64Expression`
Entry-indexed numeric expression on matrix table.
block_size : :obj:`int`, optional
Block size. Default given by :meth:`.BlockMatrix.default_block_size`.
Returns
-------
:class:`.BlockMatrix`
Correlation matrix between row vectors. Row and column indices
correspond to matrix table row index.
"""
bm = BlockMatrix.from_entry_expr(entry_expr, mean_impute=True, center=True, normalize=True, block_size=block_size)
return bm @ bm.T
@typecheck(entry_expr=expr_float64,
locus_expr=expr_locus(),
radius=oneof(int, float),
coord_expr=nullable(expr_float64),
block_size=nullable(int))
def ld_matrix(entry_expr, locus_expr, radius, coord_expr=None, block_size=None) -> BlockMatrix:
"""Computes the windowed correlation (linkage disequilibrium) matrix between
variants.
Examples
--------
Consider the following dataset consisting of three variants with centimorgan
coordinates and four samples:
>>> data = [{'v': '1:1:A:C', 'cm': 0.1, 's': 'a', 'GT': hl.Call([0, 0])},
... {'v': '1:1:A:C', 'cm': 0.1, 's': 'b', 'GT': hl.Call([0, 0])},
... {'v': '1:1:A:C', 'cm': 0.1, 's': 'c', 'GT': hl.Call([0, 1])},
... {'v': '1:1:A:C', 'cm': 0.1, 's': 'd', 'GT': hl.Call([1, 1])},
... {'v': '1:2000000:G:T', 'cm': 0.9, 's': 'a', 'GT': hl.Call([0, 1])},
... {'v': '1:2000000:G:T', 'cm': 0.9, 's': 'b', 'GT': hl.Call([1, 1])},
... {'v': '1:2000000:G:T', 'cm': 0.9, 's': 'c', 'GT': hl.Call([0, 1])},
... {'v': '1:2000000:G:T', 'cm': 0.9, 's': 'd', 'GT': hl.Call([0, 0])},
... {'v': '2:1:C:G', 'cm': 0.2, 's': 'a', 'GT': hl.Call([0, 1])},
... {'v': '2:1:C:G', 'cm': 0.2, 's': 'b', 'GT': hl.Call([0, 0])},
... {'v': '2:1:C:G', 'cm': 0.2, 's': 'c', 'GT': hl.Call([1, 1])},
... {'v': '2:1:C:G', 'cm': 0.2, 's': 'd', 'GT': hl.missing(hl.tcall)}]
>>> ht = hl.Table.parallelize(data, hl.dtype('struct{v: str, s: str, cm: float64, GT: call}'))
>>> ht = ht.transmute(**hl.parse_variant(ht.v))
>>> mt = ht.to_matrix_table(row_key=['locus', 'alleles'], col_key=['s'], row_fields=['cm'])
Compute linkage disequilibrium between all pairs of variants on the same
contig and within two megabases:
>>> ld = hl.ld_matrix(mt.GT.n_alt_alleles(), mt.locus, radius=2e6)
>>> ld.to_numpy()
array([[ 1. , -0.85280287, 0. ],
[-0.85280287, 1. , 0. ],
[ 0. , 0. , 1. ]])
Within one megabases:
>>> ld = hl.ld_matrix(mt.GT.n_alt_alleles(), mt.locus, radius=1e6)
>>> ld.to_numpy()
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
Within one centimorgan:
>>> ld = hl.ld_matrix(mt.GT.n_alt_alleles(), mt.locus, radius=1.0, coord_expr=mt.cm)
>>> ld.to_numpy()
array([[ 1. , -0.85280287, 0. ],
[-0.85280287, 1. , 0. ],
[ 0. , 0. , 1. ]])
Within one centimorgan, and only calculate the upper triangle:
>>> ld = hl.ld_matrix(mt.GT.n_alt_alleles(), mt.locus, radius=1.0, coord_expr=mt.cm)
>>> ld = ld.sparsify_triangle()
>>> ld.to_numpy()
array([[ 1. , -0.85280287, 0. ],
[ 0. , 1. , 0. ],
[ 0. , 0. , 1. ]])
Notes
-----
This method sparsifies the result of :meth:`row_correlation` using
:func:`.linalg.utils.locus_windows` and
:meth:`.BlockMatrix.sparsify_row_intervals`
in order to only compute linkage disequilibrium between nearby
variants. Use :meth:`row_correlation` directly to calculate correlation
without windowing.
More precisely, variants are 0-indexed by their order in the matrix table
(see :meth:`~hail.MatrixTable.add_row_index`). Each variant is regarded as a vector of
elements defined by `entry_expr`, typically the number of alternate alleles
or genotype dosage. Missing values are mean-imputed within variant.
The method produces a symmetric block-sparse matrix supported in a
neighborhood of the diagonal. If variants :math:`i` and :math:`j` are on the
same contig and within `radius` base pairs (inclusive) then the
:math:`(i, j)` element is their
`Pearson correlation coefficient <https://en.wikipedia.org/wiki/Pearson_correlation_coefficient>`__.
Otherwise, the :math:`(i, j)` element is ``0.0``.
Rows with a constant value (i.e., zero variance) will result in ``nan``
correlation values. To avoid this, first check that all variants vary or
filter out constant variants (for example, with the help of
:func:`.aggregators.stats`).
If the :meth:`.global_position` on `locus_expr` is not in ascending order,
this method will fail. Ascending order should hold for a matrix table keyed
by locus or variant (and the associated row table), or for a table that's
been ordered by `locus_expr`.
Set `coord_expr` to use a value other than position to define the windows.
This row-indexed numeric expression must be non-missing, non-``nan``, on the
same source as `locus_expr`, and ascending with respect to locus
position for each contig; otherwise the method will raise an error.
Warning
-------
See the warnings in :meth:`row_correlation`. In particular, for large
matrices it may be preferable to run its stages separately.
`entry_expr` and `locus_expr` are implicitly aligned by row-index, though
they need not be on the same source. If their sources differ in the number
of rows, an error will be raised; otherwise, unintended misalignment may
silently produce unexpected results.
Parameters
----------
entry_expr : :class:`.Float64Expression`
Entry-indexed numeric expression on matrix table.
locus_expr : :class:`.LocusExpression`
Row-indexed locus expression on a table or matrix table that is
row-aligned with the matrix table of `entry_expr`.
radius: :obj:`int` or :obj:`float`
Radius of window for row values.
coord_expr: :class:`.Float64Expression`, optional
Row-indexed numeric expression for the row value on the same table or
matrix table as `locus_expr`.
By default, the row value is given by the locus position.
block_size : :obj:`int`, optional
Block size. Default given by :meth:`.BlockMatrix.default_block_size`.
Returns
-------
:class:`.BlockMatrix`
Windowed correlation matrix between variants.
Row and column indices correspond to matrix table variant index.
"""
starts_and_stops = hl.linalg.utils.locus_windows(locus_expr, radius, coord_expr, _localize=False)
starts_and_stops = hl.tuple([starts_and_stops[0].map(lambda i: hl.int64(i)), starts_and_stops[1].map(lambda i: hl.int64(i))])
ld = hl.row_correlation(entry_expr, block_size)
return ld._sparsify_row_intervals_expr(starts_and_stops, blocks_only=False)
@typecheck(n_populations=int,
n_samples=int,
n_variants=int,
n_partitions=nullable(int),
pop_dist=nullable(sequenceof(numeric)),
fst=nullable(sequenceof(numeric)),
af_dist=nullable(expr_any),
reference_genome=reference_genome_type,
mixture=bool)
def balding_nichols_model(n_populations, n_samples, n_variants, n_partitions=None,
pop_dist=None, fst=None, af_dist=None,
reference_genome='default', mixture=False) -> MatrixTable:
r"""Generate a matrix table of variants, samples, and genotypes using the
Balding-Nichols or Pritchard-Stephens-Donnelly model.
Examples
--------
Generate a matrix table of genotypes with 1000 variants and 100 samples
across 3 populations:
>>> bn_ds = hl.balding_nichols_model(3, 100, 1000, reference_genome='GRCh37')
Generate a matrix table using 4 populations, 40 samples, 150 variants, 3
partitions, population distribution ``[0.1, 0.2, 0.3, 0.4]``,
:math:`F_{ST}` values ``[.02, .06, .04, .12]``, ancestral allele
frequencies drawn from a truncated beta distribution with ``a = 0.01`` and
``b = 0.05`` over the interval ``[0.05, 1]``, and random seed 1:
>>> hl.set_global_seed(1)
>>> bn_ds = hl.balding_nichols_model(4, 40, 150, 3,
... pop_dist=[0.1, 0.2, 0.3, 0.4],
... fst=[.02, .06, .04, .12],
... af_dist=hl.rand_beta(a=0.01, b=2.0, lower=0.05, upper=1.0))
To guarantee reproducibility, we set the Hail global seed with
:func:`.set_global_seed` immediately prior to generating the dataset.
Notes
-----
This method simulates a matrix table of variants, samples, and genotypes
using the Balding-Nichols model, which we now define.
- :math:`K` populations are labeled by integers :math:`0, 1, \dots, K - 1`.
- :math:`N` samples are labeled by strings :math:`0, 1, \dots, N - 1`.
- :math:`M` variants are defined as ``1:1:A:C``, ``1:2:A:C``, ...,
``1:M:A:C``.
- The default distribution for population assignment :math:`\pi` is uniform.
- The default ancestral frequency distribution :math:`P_0` is uniform on
:math:`[0.1, 0.9]`.
All three classes are located in ``hail.stats``.
- The default :math:`F_{ST}` values are all :math:`0.1`.
The Balding-Nichols model models genotypes of individuals from a structured
population comprising :math:`K` homogeneous modern populations that have
each diverged from a single ancestral population (a `star phylogeny`). Each
sample is assigned a population by sampling from the categorical
distribution :math:`\pi`. Note that the actual size of each population is
random.
Variants are modeled as biallelic and unlinked. Ancestral allele
frequencies are drawn independently for each variant from a frequency
spectrum :math:`P_0`. The extent of genetic drift of each modern population
from the ancestral population is defined by the corresponding :math:`F_{ST}`
parameter :math:`F_k` (here and below, lowercase indices run over a range
bounded by the corresponding uppercase parameter, e.g. :math:`k = 1, \ldots,
K`). For each variant and population, allele frequencies are drawn from a
`beta distribution <https://en.wikipedia.org/wiki/Beta_distribution>`__
whose parameters are determined by the ancestral allele frequency and
:math:`F_{ST}` parameter. The beta distribution gives a continuous
approximation of the effect of genetic drift. We denote sample population
assignments by :math:`k_n`, ancestral allele frequencies by :math:`p_m`,
population allele frequencies by :math:`p_{k, m}`, and diploid, unphased
genotype calls by :math:`g_{n, m}` (0, 1, and 2 correspond to homozygous
reference, heterozygous, and homozygous variant, respectively).
The generative model is then given by:
.. math::
\begin{aligned}
k_n \,&\sim\, \pi \\
p_m \,&\sim\, P_0 \\
p_{k,m} \mid p_m\,&\sim\, \mathrm{Beta}(\mu = p_m,\, \sigma^2 = F_k p_m (1 - p_m)) \\
g_{n,m} \mid k_n, p_{k, m} \,&\sim\, \mathrm{Binomial}(2, p_{k_n, m})
\end{aligned}
The beta distribution by its mean and variance above; the usual parameters
are :math:`a = (1 - p) \frac{1 - F}{F}` and :math:`b = p \frac{1 - F}{F}` with
:math:`F = F_k` and :math:`p = p_m`.
The resulting dataset has the following fields.
Global fields:
- `bn.n_populations` (:py:data:`.tint32`) -- Number of populations.
- `bn.n_samples` (:py:data:`.tint32`) -- Number of samples.
- `bn.n_variants` (:py:data:`.tint32`) -- Number of variants.
- `bn.n_partitions` (:py:data:`.tint32`) -- Number of partitions.
- `bn.pop_dist` (:class:`.tarray` of :py:data:`.tfloat64`) -- Population distribution indexed by
population.
- `bn.fst` (:class:`.tarray` of :py:data:`.tfloat64`) -- :math:`F_{ST}` values indexed by
population.
- `bn.seed` (:py:data:`.tint32`) -- Random seed.
- `bn.mixture` (:py:data:`.tbool`) -- Value of `mixture` parameter.
Row fields:
- `locus` (:class:`.tlocus`) -- Variant locus (key field).
- `alleles` (:class:`.tarray` of :py:data:`.tstr`) -- Variant alleles (key field).
- `ancestral_af` (:py:data:`.tfloat64`) -- Ancestral allele frequency.
- `af` (:class:`.tarray` of :py:data:`.tfloat64`) -- Modern allele frequencies indexed by
population.
Column fields:
- `sample_idx` (:py:data:`.tint32`) - Sample index (key field).
- `pop` (:py:data:`.tint32`) -- Population of sample.
Entry fields:
- `GT` (:py:data:`.tcall`) -- Genotype call (diploid, unphased).
For the `Pritchard-Stephens-Donnelly model <http://www.genetics.org/content/155/2/945.long>`__,
set the `mixture` to true to treat `pop_dist` as the parameters of the
Dirichlet distribution describing admixture between the modern populations.
In this case, the type of `pop` is :class:`.tarray` of
:py:data:`.tfloat64` and the value is the mixture proportions.
Parameters
----------
n_populations : :obj:`int`
Number of modern populations.
n_samples : :obj:`int`
Total number of samples.
n_variants : :obj:`int`
Number of variants.
n_partitions : :obj:`int`, optional
Number of partitions.
Default is 1 partition per million entries or 8, whichever is larger.
pop_dist : :obj:`list` of :obj:`float`, optional
Unnormalized population distribution, a list of length
`n_populations` with non-negative values.
Default is ``[1, ..., 1]``.
fst : :obj:`list` of :obj:`float`, optional
:math:`F_{ST}` values, a list of length `n_populations` with values
in (0, 1). Default is ``[0.1, ..., 0.1]``.
af_dist : :class:`.Float64Expression`, optional
Representing a random function. Ancestral allele frequency
distribution. Default is :func:`.rand_unif` over the range
`[0.1, 0.9]` with seed 0.
reference_genome : :class:`str` or :class:`.ReferenceGenome`
Reference genome to use.
mixture : :obj:`bool`
Treat `pop_dist` as the parameters of a Dirichlet distribution,
as in the Prichard-Stevens-Donnelly model.
Returns
-------
:class:`.MatrixTable`
Simulated matrix table of variants, samples, and genotypes.
"""
if pop_dist is None:
pop_dist = [1 for _ in range(n_populations)]
if fst is None:
fst = [0.1 for _ in range(n_populations)]
if af_dist is None:
af_dist = hl.rand_unif(0.1, 0.9, seed=0)
if n_partitions is None:
n_partitions = max(8, int(n_samples * n_variants / (128 * 1024 * 1024)))
# verify args
for name, var in {"populations": n_populations,
"samples": n_samples,
"variants": n_variants,
"partitions": n_partitions}.items():
if var < 1:
raise ValueError("n_{} must be positive, got {}".format(name, var))
for name, var in {"pop_dist": pop_dist, "fst": fst}.items():
if len(var) != n_populations:
raise ValueError("{} must be of length n_populations={}, got length {}"
.format(name, n_populations, len(var)))
if any(x < 0 for x in pop_dist):
raise ValueError("pop_dist must be non-negative, got {}"
.format(pop_dist))
if any(x <= 0 or x >= 1 for x in fst):
raise ValueError("elements of fst must satisfy 0 < x < 1, got {}"
.format(fst))
# verify af_dist
if not af_dist._is_scalar:
raise ExpressionException('balding_nichols_model expects af_dist to '
+ 'have scalar arguments: found expression '
+ 'from source {}'
.format(af_dist._indices.source))
if af_dist.dtype != tfloat64:
raise ValueError("af_dist must be a hail function with return type tfloat64.")
info("balding_nichols_model: generating genotypes for {} populations, {} samples, and {} variants..."
.format(n_populations, n_samples, n_variants))
# generate matrix table
bn = hl.utils.range_matrix_table(n_variants, n_samples, n_partitions)
bn = bn.annotate_globals(
bn=hl.struct(n_populations=n_populations,
n_samples=n_samples,
n_variants=n_variants,
n_partitions=n_partitions,
pop_dist=pop_dist,
fst=fst,
mixture=mixture))
# col info
pop_f = hl.rand_dirichlet if mixture else hl.rand_cat
bn = bn.key_cols_by(sample_idx=bn.col_idx)
bn = bn.select_cols(pop=pop_f(pop_dist))
# row info
bn = bn.key_rows_by(locus=hl.locus_from_global_position(bn.row_idx, reference_genome=reference_genome),
alleles=['A', 'C'])
bn = bn.select_rows(ancestral_af=af_dist,
af=hl.bind(lambda ancestral:
hl.array([(1 - x) / x for x in fst])
.map(lambda x:
hl.rand_beta(ancestral * x,
(1 - ancestral) * x)),
af_dist))
# entry info
p = hl.sum(bn.pop * bn.af) if mixture else bn.af[bn.pop]
idx = hl.rand_cat([(1 - p) ** 2, 2 * p * (1 - p), p ** 2])
return bn.select_entries(GT=hl.unphased_diploid_gt_index_call(idx))
@typecheck(mt=MatrixTable, f=anytype)
def filter_alleles(mt: MatrixTable,
f: Callable) -> MatrixTable:
"""Filter alternate alleles.
.. include:: ../_templates/req_tvariant.rst
Examples
--------
Keep SNPs:
>>> ds_result = hl.filter_alleles(ds, lambda allele, i: hl.is_snp(ds.alleles[0], allele))
Keep alleles with AC > 0:
>>> ds_result = hl.filter_alleles(ds, lambda a, allele_index: ds.info.AC[allele_index - 1] > 0)
Update the AC field of the resulting dataset:
>>> updated_info = ds_result.info.annotate(AC = ds_result.new_to_old.map(lambda i: ds_result.info.AC[i-1]))
>>> ds_result = ds_result.annotate_rows(info = updated_info)
Notes
-----
The following new fields are generated:
- `old_locus` (``locus``) -- The old locus, before filtering and computing
the minimal representation.
- `old_alleles` (``array<str>``) -- The old alleles, before filtering and
computing the minimal representation.
- `old_to_new` (``array<int32>``) -- An array that maps old allele index to
new allele index. Its length is the same as `old_alleles`. Alleles that
are filtered are missing.
- `new_to_old` (``array<int32>``) -- An array that maps new allele index to
the old allele index. Its length is the same as the modified `alleles`
field.
If all alternate alleles of a variant are filtered out, the variant itself
is filtered out.
**Using** `f`
The `f` argument is a function or lambda evaluated per alternate allele to
determine whether that allele is kept. If `f` evaluates to ``True``, the
allele is kept. If `f` evaluates to ``False`` or missing, the allele is
removed.
`f` is a function that takes two arguments: the allele string (of type
:class:`.StringExpression`) and the allele index (of type
:class:`.Int32Expression`), and returns a boolean expression. This can
be either a defined function or a lambda. For example, these two usages
are equivalent:
(with a lambda)
>>> ds_result = hl.filter_alleles(ds, lambda allele, i: hl.is_snp(ds.alleles[0], allele))
(with a defined function)
>>> def filter_f(allele, allele_index):
... return hl.is_snp(ds.alleles[0], allele)
>>> ds_result = hl.filter_alleles(ds, filter_f)
Warning
-------
:func:`.filter_alleles` does not update any fields other than `locus` and
`alleles`. This means that row fields like allele count (AC) and entry
fields like allele depth (AD) can become meaningless unless they are also
updated. You can update them with :meth:`.annotate_rows` and
:meth:`.annotate_entries`.
See Also
--------
:func:`.filter_alleles_hts`
Parameters
----------
mt : :class:`.MatrixTable`
Dataset.
f : callable
Function from (allele: :class:`.StringExpression`, allele_index:
:class:`.Int32Expression`) to :class:`.BooleanExpression`
Returns
-------
:class:`.MatrixTable`
"""
require_row_key_variant(mt, 'filter_alleles')
inclusion = hl.range(0, hl.len(mt.alleles)).map(lambda i: (i == 0) | hl.bind(lambda ii: f(mt.alleles[ii], ii), i))
# old locus, old alleles, new to old, old to new
mt = mt.annotate_rows(__allele_inclusion=inclusion,
old_locus=mt.locus,
old_alleles=mt.alleles)
new_to_old = (hl.enumerate(mt.__allele_inclusion)
.filter(lambda elt: elt[1])
.map(lambda elt: elt[0]))
old_to_new_dict = (hl.dict(hl.enumerate(hl.enumerate(mt.alleles)
.filter(lambda elt: mt.__allele_inclusion[elt[0]]))
.map(lambda elt: (elt[1][1], elt[0]))))
old_to_new = hl.bind(lambda d: mt.alleles.map(lambda a: d.get(a)), old_to_new_dict)
mt = mt.annotate_rows(old_to_new=old_to_new, new_to_old=new_to_old)
new_locus_alleles = hl.min_rep(mt.locus, mt.new_to_old.map(lambda i: mt.alleles[i]))
mt = mt.annotate_rows(__new_locus=new_locus_alleles.locus, __new_alleles=new_locus_alleles.alleles)
mt = mt.filter_rows(hl.len(mt.__new_alleles) > 1)
left = mt.filter_rows((mt.locus == mt.__new_locus) & (mt.alleles == mt.__new_alleles))
right = mt.filter_rows((mt.locus != mt.__new_locus) | (mt.alleles != mt.__new_alleles))
right = right.key_rows_by(locus=right.__new_locus, alleles=right.__new_alleles)
return left.union_rows(right, _check_cols=False).drop('__allele_inclusion', '__new_locus', '__new_alleles')
@typecheck(mt=MatrixTable, f=anytype, subset=bool)
def filter_alleles_hts(mt: MatrixTable,
f: Callable,
subset: bool = False) -> MatrixTable:
"""Filter alternate alleles and update standard GATK entry fields.
Examples
--------
Filter to SNP alleles using the subset strategy:
>>> ds_result = hl.filter_alleles_hts(
... ds,
... lambda allele, _: hl.is_snp(ds.alleles[0], allele),
... subset=True)
Update the AC field of the resulting dataset:
>>> updated_info = ds_result.info.annotate(AC = ds_result.new_to_old.map(lambda i: ds_result.info.AC[i-1]))
>>> ds_result = ds_result.annotate_rows(info = updated_info)
Notes
-----
For usage of the `f` argument, see the :func:`.filter_alleles`
documentation.
:func:`.filter_alleles_hts` requires the dataset have the GATK VCF schema,
namely the following entry fields in this order:
.. code-block:: text
GT: call
AD: array<int32>
DP: int32
GQ: int32
PL: array<int32>
Use :meth:`.MatrixTable.select_entries` to rearrange these fields if
necessary.
The following new fields are generated:
- `old_locus` (``locus``) -- The old locus, before filtering and computing
the minimal representation.
- `old_alleles` (``array<str>``) -- The old alleles, before filtering and
computing the minimal representation.
- `old_to_new` (``array<int32>``) -- An array that maps old allele index to
new allele index. Its length is the same as `old_alleles`. Alleles that
are filtered are missing.
- `new_to_old` (``array<int32>``) -- An array that maps new allele index to
the old allele index. Its length is the same as the modified `alleles`
field.
**Downcode algorithm**
We will illustrate the behavior on the example genotype below
when filtering the first alternate allele (allele 1) at a site
with 1 reference allele and 2 alternate alleles.
.. code-block:: text
GT: 1/2
GQ: 10
AD: 0,50,35
0 | 1000
1 | 1000 10
2 | 1000 0 20
+-----------------
0 1 2
The downcode algorithm recodes occurances of filtered alleles
to occurances of the reference allele (e.g. 1 -> 0 in our
example). So the depths of filtered alleles in the AD field
are added to the depth of the reference allele. Where
downcoding filtered alleles merges distinct genotypes, the
minimum PL is used (since PL is on a log scale, this roughly
corresponds to adding probabilities). The PLs are then
re-normalized (shifted) so that the most likely genotype has a
PL of 0, and GT is set to this genotype. If an allele is
filtered, this algorithm acts similarly to
:func:`.split_multi_hts`.
The downcode algorithm would produce the following:
.. code-block:: text
GT: 0/1
GQ: 10
AD: 35,50
0 | 20
1 | 0 10
+-----------
0 1
In summary:
- GT: Downcode filtered alleles to reference.
- AD: Columns of filtered alleles are eliminated and their
values are added to the reference column, e.g., filtering
alleles 1 and 2 transforms ``25,5,10,20`` to ``40,20``.
- DP: No change.
- PL: Downcode filtered alleles to reference, combine PLs
using minimum for each overloaded genotype, and shift so
the overall minimum PL is 0.
- GQ: The second-lowest PL (after shifting).
**Subset algorithm**
We will illustrate the behavior on the example genotype below
when filtering the first alternate allele (allele 1) at a site
with 1 reference allele and 2 alternate alleles.
.. code-block:: text
GT: 1/2
GQ: 10
AD: 0,50,35
0 | 1000
1 | 1000 10
2 | 1000 0 20
+-----------------
0 1 2
The subset algorithm subsets the AD and PL arrays
(i.e. removes entries corresponding to filtered alleles) and
then sets GT to the genotype with the minimum PL. Note that
if the genotype changes (as in the example), the PLs are
re-normalized (shifted) so that the most likely genotype has a
PL of 0. Qualitatively, subsetting corresponds to the belief
that the filtered alleles are not real so we should discard
any probability mass associated with them.
The subset algorithm would produce the following:
.. code-block:: text
GT: 1/1
GQ: 980
AD: 0,50
0 | 980
1 | 980 0
+-----------
0 1
In summary:
- GT: Set to most likely genotype based on the PLs ignoring
the filtered allele(s).
- AD: The filtered alleles' columns are eliminated, e.g.,
filtering alleles 1 and 2 transforms ``25,5,10,20`` to
``25,20``.
- DP: Unchanged.
- PL: Columns involving filtered alleles are eliminated and
the remaining columns' values are shifted so the minimum
value is 0.
- GQ: The second-lowest PL (after shifting).
Warning
-------
:func:`.filter_alleles_hts` does not update any row fields other than
`locus` and `alleles`. This means that row fields like allele count (AC) can
become meaningless unless they are also updated. You can update them with
:meth:`.annotate_rows`.
See Also
--------
:func:`.filter_alleles`
Parameters
----------
mt : :class:`.MatrixTable`
f : callable
Function from (allele: :class:`.StringExpression`, allele_index:
:class:`.Int32Expression`) to :class:`.BooleanExpression`
subset : :obj:`.bool`
Subset PL field if ``True``, otherwise downcode PL field. The
calculation of GT and GQ also depend on whether one subsets or
downcodes the PL.
Returns
-------
:class:`.MatrixTable`
"""
if mt.entry.dtype != hl.hts_entry_schema:
raise FatalError("'filter_alleles_hts': entry schema must be the HTS entry schema:\n"
" found: {}\n"
" expected: {}\n"
" Use 'hl.filter_alleles' to split entries with non-HTS entry fields.".format(
mt.entry.dtype, hl.hts_entry_schema))
mt = filter_alleles(mt, f)
if subset:
newPL = hl.if_else(
hl.is_defined(mt.PL),
hl.bind(
lambda unnorm: unnorm - hl.min(unnorm),
hl.range(0, hl.triangle(mt.alleles.length())).map(
lambda newi: hl.bind(
lambda newc: mt.PL[hl.call(mt.new_to_old[newc[0]],
mt.new_to_old[newc[1]]).unphased_diploid_gt_index()],
hl.unphased_diploid_gt_index_call(newi)))),
hl.missing(tarray(tint32)))
return mt.annotate_entries(
GT=hl.unphased_diploid_gt_index_call(hl.argmin(newPL, unique=True)),
AD=hl.if_else(
hl.is_defined(mt.AD),
hl.range(0, mt.alleles.length()).map(
lambda newi: mt.AD[mt.new_to_old[newi]]),
hl.missing(tarray(tint32))),
# DP unchanged
GQ=hl.gq_from_pl(newPL),
PL=newPL)
# otherwise downcode
else:
mt = mt.annotate_rows(__old_to_new_no_na=mt.old_to_new.map(lambda x: hl.or_else(x, 0)))
newPL = hl.if_else(
hl.is_defined(mt.PL),
(hl.range(0, hl.triangle(hl.len(mt.alleles)))
.map(lambda newi: hl.min(hl.range(0, hl.triangle(hl.len(mt.old_alleles)))
.filter(lambda oldi: hl.bind(
lambda oldc: hl.call(mt.__old_to_new_no_na[oldc[0]],
mt.__old_to_new_no_na[oldc[1]]) == hl.unphased_diploid_gt_index_call(newi),
hl.unphased_diploid_gt_index_call(oldi)))
.map(lambda oldi: mt.PL[oldi])))),
hl.missing(tarray(tint32)))
return mt.annotate_entries(
GT=hl.call(mt.__old_to_new_no_na[mt.GT[0]],
mt.__old_to_new_no_na[mt.GT[1]]),
AD=hl.if_else(
hl.is_defined(mt.AD),
(hl.range(0, hl.len(mt.alleles))
.map(lambda newi: hl.sum(hl.range(0, hl.len(mt.old_alleles))
.filter(lambda oldi: mt.__old_to_new_no_na[oldi] == newi)
.map(lambda oldi: mt.AD[oldi])))),
hl.missing(tarray(tint32))),
# DP unchanged
GQ=hl.gq_from_pl(newPL),
PL=newPL).drop('__old_to_new_no_na')
@typecheck(mt=MatrixTable,
call_field=str,
r2=numeric,
bp_window_size=int,
memory_per_core=int)
def _local_ld_prune(mt, call_field, r2=0.2, bp_window_size=1000000, memory_per_core=256):
bytes_per_core = memory_per_core * 1024 * 1024
fraction_memory_to_use = 0.25
variant_byte_overhead = 50
genotypes_per_pack = 32
n_samples = mt.count_cols()
min_bytes_per_core = math.ceil((1 / fraction_memory_to_use) * 8 * n_samples + variant_byte_overhead)
if bytes_per_core < min_bytes_per_core:
raise ValueError("memory_per_core must be greater than {} MB".format(min_bytes_per_core // (1024 * 1024)))
bytes_per_variant = math.ceil(8 * n_samples / genotypes_per_pack) + variant_byte_overhead
bytes_available_per_core = bytes_per_core * fraction_memory_to_use
max_queue_size = int(max(1.0, math.ceil(bytes_available_per_core / bytes_per_variant)))
info(f'ld_prune: running local pruning stage with max queue size of {max_queue_size} variants')
return Table(ir.MatrixToTableApply(mt._mir, {
'name': 'LocalLDPrune',
'callField': call_field,
'r2Threshold': float(r2),
'windowSize': bp_window_size,
'maxQueueSize': max_queue_size
}))
@typecheck(call_expr=expr_call,
r2=numeric,
bp_window_size=int,
memory_per_core=int,
keep_higher_maf=bool,
block_size=nullable(int))
def ld_prune(call_expr, r2=0.2, bp_window_size=1000000, memory_per_core=256, keep_higher_maf=True, block_size=None):
"""Returns a maximal subset of variants that are nearly uncorrelated within each window.
.. include:: ../_templates/req_diploid_gt.rst
.. include:: ../_templates/req_biallelic.rst
.. include:: ../_templates/req_tvariant.rst
Examples
--------
Prune variants in linkage disequilibrium by filtering a dataset to those variants returned
by :func:`.ld_prune`. If the dataset contains multiallelic variants, the multiallelic variants
must be filtered out or split before being passed to :func:`.ld_prune`.
>>> biallelic_dataset = dataset.filter_rows(hl.len(dataset.alleles) == 2)
>>> pruned_variant_table = hl.ld_prune(biallelic_dataset.GT, r2=0.2, bp_window_size=500000)
>>> filtered_ds = dataset.filter_rows(hl.is_defined(pruned_variant_table[dataset.row_key]))
Notes
-----
This method finds a maximal subset of variants such that the squared Pearson
correlation coefficient :math:`r^2` of any pair at most `bp_window_size`
base pairs apart is strictly less than `r2`. Each variant is represented as
a vector over samples with elements given by the (mean-imputed) number of
alternate alleles. In particular, even if present, **phase information is
ignored**. Variants that do not vary across samples are dropped.
The method prunes variants in linkage disequilibrium in three stages.
- The first, "local pruning" stage prunes correlated variants within each
partition, using a local variant queue whose size is determined by
`memory_per_core`. A larger queue may facilitate more local pruning in
this stage. Minor allele frequency is not taken into account. The
parallelism is the number of matrix table partitions.
- The second, "global correlation" stage uses block-sparse matrix
multiplication to compute correlation between each pair of remaining
variants within `bp_window_size` base pairs, and then forms a graph of
correlated variants. The parallelism of writing the locally-pruned matrix
table as a block matrix is ``n_locally_pruned_variants / block_size``.
- The third, "global pruning" stage applies :func:`.maximal_independent_set`
to prune variants from this graph until no edges remain. This algorithm
iteratively removes the variant with the highest vertex degree. If
`keep_higher_maf` is true, then in the case of a tie for highest degree,
the variant with lowest minor allele frequency is removed.
Warning
-------
The locally-pruned matrix table and block matrix are stored as temporary files
on persistent disk. See the warnings on `BlockMatrix.from_entry_expr` with
regard to memory and Hadoop replication errors.
Parameters
----------
call_expr : :class:`.CallExpression`
Entry-indexed call expression on a matrix table with row-indexed
variants and column-indexed samples.
r2 : :obj:`float`
Squared correlation threshold (exclusive upper bound).
Must be in the range [0.0, 1.0].
bp_window_size: :obj:`int`
Window size in base pairs (inclusive upper bound).
memory_per_core : :obj:`int`
Memory in MB per core for local pruning queue.
keep_higher_maf: :obj:`int`
If ``True``, break ties at each step of the global pruning stage by
preferring to keep variants with higher minor allele frequency.
block_size: :obj:`int`, optional
Block size for block matrices in the second stage.
Default given by :meth:`.BlockMatrix.default_block_size`.
Returns
-------
:class:`.Table`
Table of a maximal independent set of variants.
"""
if block_size is None:
block_size = BlockMatrix.default_block_size()
if not 0.0 <= r2 <= 1:
raise ValueError(f'r2 must be in the range [0.0, 1.0], found {r2}')
if bp_window_size < 0:
raise ValueError(f'bp_window_size must be non-negative, found {bp_window_size}')
check_entry_indexed('ld_prune/call_expr', call_expr)
mt = matrix_table_source('ld_prune/call_expr', call_expr)
require_row_key_variant(mt, 'ld_prune')
# FIXME: remove once select_entries on a field is free
if call_expr in mt._fields_inverse:
field = mt._fields_inverse[call_expr]
else:
field = Env.get_uid()
mt = mt.select_entries(**{field: call_expr})
mt = mt.select_rows().select_cols()
mt = mt.distinct_by_row()
locally_pruned_table_path = new_temp_file()
(_local_ld_prune(require_biallelic(mt, 'ld_prune'), field, r2, bp_window_size, memory_per_core)
.write(locally_pruned_table_path, overwrite=True))
locally_pruned_table = hl.read_table(locally_pruned_table_path).add_index()
mt = mt.annotate_rows(info=locally_pruned_table[mt.row_key])
mt = mt.filter_rows(hl.is_defined(mt.info)).unfilter_entries()
std_gt_bm = BlockMatrix.from_entry_expr(
hl.or_else(
(mt[field].n_alt_alleles() - mt.info.mean) * mt.info.centered_length_rec,
0.0),
block_size=block_size)
r2_bm = (std_gt_bm @ std_gt_bm.T) ** 2
_, stops = hl.linalg.utils.locus_windows(locally_pruned_table.locus, bp_window_size)
entries = r2_bm.sparsify_row_intervals(range(stops.size), stops, blocks_only=True).entries(keyed=False)
entries = entries.filter((entries.entry >= r2) & (entries.i < entries.j))
entries = entries.select(i=hl.int32(entries.i), j=hl.int32(entries.j))
if keep_higher_maf:
fields = ['mean', 'locus']
else:
fields = ['locus']
info = locally_pruned_table.aggregate(
hl.agg.collect(locally_pruned_table.row.select('idx', *fields)), _localize=False)
info = hl.sorted(info, key=lambda x: x.idx)
entries = entries.annotate_globals(info=info)
entries = entries.filter(
(entries.info[entries.i].locus.contig == entries.info[entries.j].locus.contig)
& (entries.info[entries.j].locus.position - entries.info[entries.i].locus.position <= bp_window_size))
if keep_higher_maf:
entries = entries.annotate(
i=hl.struct(idx=entries.i,
twice_maf=hl.min(entries.info[entries.i].mean, 2.0 - entries.info[entries.i].mean)),
j=hl.struct(idx=entries.j,
twice_maf=hl.min(entries.info[entries.j].mean, 2.0 - entries.info[entries.j].mean)))
def tie_breaker(left, right):
return hl.sign(right.twice_maf - left.twice_maf)
else:
tie_breaker = None
variants_to_remove = hl.maximal_independent_set(
entries.i, entries.j, keep=False, tie_breaker=tie_breaker, keyed=False)
locally_pruned_table = locally_pruned_table.annotate_globals(
variants_to_remove=variants_to_remove.aggregate(
hl.agg.collect_as_set(variants_to_remove.node.idx), _localize=False))
return locally_pruned_table.filter(
locally_pruned_table.variants_to_remove.contains(hl.int32(locally_pruned_table.idx)),
keep=False
).select().persist()
def _warn_if_no_intercept(caller, covariates):
if all([e._indices.axes for e in covariates]):
warning(f'{caller}: model appears to have no intercept covariate.'
'\n To include an intercept, add 1.0 to the list of covariates.')
return True
return False
| mit |
RodericDay/MiniPNM | minipnm/graphics.py | 1 | 8311 | from __future__ import print_function
from tempfile import NamedTemporaryFile
from subprocess import call
import itertools as it
import numpy as np
import matplotlib as mpl
from matplotlib import cm
try:
import vtk
except ImportError:
vtk = type("vtk module missing. functionality unavailable!",
(), {'vtkActor': object})
class Actor(vtk.vtkActor):
callable = print
def __init__(self):
raise NotImplementedError()
def set_points(self, points):
pointArray = vtk.vtkPoints()
for x,y,z in points:
pointArray.InsertNextPoint(x, y, z)
self.polydata.SetPoints(pointArray)
def set_lines(self, lines):
cellArray = vtk.vtkCellArray()
for ids in lines:
idList = vtk.vtkIdList()
for i in ids:
idList.InsertNextId(i)
cellArray.InsertNextCell(idList)
self.polydata.SetLines(cellArray)
def set_scalars(self, values):
floats = vtk.vtkFloatArray()
for v in values:
floats.InsertNextValue(v)
self.polydata.GetPointData().SetScalars(floats)
def update(self, t=0):
i = t % len(self.script)
self.set_scalars(self.script[i])
class Wires(Actor):
'''
Points and lines. The script determines the color of the lines.
'''
def __init__(self, points, pairs, values=None, cmap=None, alpha=1, vmin=None, vmax=None):
self.polydata = vtk.vtkPolyData()
self.set_points(points)
self.set_lines(pairs)
self.mapper = vtk.vtkPolyDataMapper()
self.mapper.SetInput(self.polydata)
self.SetMapper(self.mapper)
self.GetProperty().SetOpacity(alpha)
if values is None:
values = np.ones(len(points))*0.5
vmin, vmax = 0, 1
self.script = np.atleast_2d(values)
cmap = cm.get_cmap(cmap if cmap is not None else 'coolwarm')
vmin = vmin if vmin is not None else self.script.min()
vmax = vmax if vmax is not None else self.script.max()
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
self.cmapper = cm.ScalarMappable(norm=norm, cmap=cmap)
self.update()
def set_scalars(self, values):
colors = vtk.vtkUnsignedCharArray()
colors.SetNumberOfComponents(3)
for r,g,b,a in 255*self.cmapper.to_rgba(values):
colors.InsertNextTuple3(r,g,b)
self.polydata.GetPointData().SetScalars(colors)
class Spheres(Actor):
def __init__(self, centers, radii=1, alpha=1, color=(1,1,1)):
self.polydata = vtk.vtkPolyData()
self.set_points(centers)
self.source = vtk.vtkSphereSource()
self.glyph3D = vtk.vtkGlyph3D()
self.glyph3D.SetSourceConnection(self.source.GetOutputPort())
self.glyph3D.SetInput(self.polydata)
self.glyph3D.GeneratePointIdsOn()
self.glyph3D.Update()
self.mapper = vtk.vtkPolyDataMapper()
self.mapper.SetInputConnection(self.glyph3D.GetOutputPort())
self.SetMapper(self.mapper)
self.script = 2*np.atleast_2d(np.ones(len(centers)))*radii
self.GetProperty().SetOpacity(alpha)
r,g,b = color
self.mapper.ScalarVisibilityOff()
self.GetProperty().SetColor(r,g,b)
self.update()
class Tubes(Actor):
def __init__(self, centers, vectors, radii, alpha=1, cmap=None):
tails = centers - np.divide(vectors, 2.)
heads = centers + np.divide(vectors, 2.)
points = np.vstack(zip(tails, heads))
pairs = np.arange(len(centers)*2).reshape(-1, 2)
radii = np.repeat(radii, 2)
assert (points.size/3. == pairs.size)
assert (pairs.size == radii.size)
self.polydata = vtk.vtkPolyData()
self.set_points(points)
self.set_lines(pairs)
self.set_scalars(radii)
self.tubeFilter = vtk.vtkTubeFilter()
self.tubeFilter.SetInput(self.polydata)
self.tubeFilter.SetVaryRadiusToVaryRadiusByAbsoluteScalar()
self.tubeFilter.SetNumberOfSides(10)
# self.tubeFilter.CappingOn()
self.mapper = vtk.vtkPolyDataMapper()
self.mapper.SetInputConnection(self.tubeFilter.GetOutputPort())
self.mapper.ScalarVisibilityOff()
self.SetMapper(self.mapper)
self.GetProperty().SetOpacity(alpha)
self.script = [0]
def update(self, t=0):
pass
class Scene(object):
ticks = it.count(0)
def __init__(self, parent=None, fix_camera=True,
background=None, size=None):
'''
fix_camera : more sensible default
'''
if parent is not None:
self.renWin = parent.GetRenderWindow()
self.iren = self.renWin.GetInteractor()
else:
self.renWin = vtk.vtkRenderWindow()
if size is None:
self.renWin.SetSize(800, 600)
self.iren = vtk.vtkRenderWindowInteractor()
self.iren.SetRenderWindow(self.renWin)
self.ren = vtk.vtkRenderer()
if background == 'white':
self.ren.SetBackground(1, 1, 1)
self.renWin.AddRenderer(self.ren)
if fix_camera:
camera = vtk.vtkInteractorStyleTrackballCamera()
camera.SetCurrentRenderer(self.ren)
self.iren.SetInteractorStyle(camera)
self.picker = vtk.vtkCellPicker()
self.iren.SetPicker(self.picker)
self.picker.AddObserver("EndPickEvent", self.handle_pick)
def __iter__(self):
for aid in range(self.ren.VisibleActorCount()):
actor = self.ren.GetActors().GetItemAsObject(aid)
if hasattr(actor, 'script'):
yield actor
@property
def count(self):
return len([actor for actor in self])
def __len__(self):
if self.count==0:
return 0
return max(len(actor.script) for actor in self)
def handle_pick(self, obj, event):
actor = self.picker.GetActor()
glyph3D = actor.glyph3D
pointIds = glyph3D.GetOutput().GetPointData().GetArray("InputPointIds")
selectedId = int(pointIds.GetTuple1(self.picker.GetPointId()))
actor.callable(selectedId)
actor.polydata.Modified()
self.renWin.Render()
def update_all(self, obj=None, event=None, t=None):
if t is None: t = next(self.ticks)
for actor in self:
actor.update(t)
self.renWin.Render()
def save(self, frames, outfile='animated.mp4'):
'''
takes a snapshot of the frames at given t, and returns the paths
'''
windowToImage = vtk.vtkWindowToImageFilter()
windowToImage.SetInput(self.renWin)
writer = vtk.vtkPNGWriter()
writer.SetInput(windowToImage.GetOutput())
slide_paths = []
for t in frames:
# f = NamedTemporaryFile(suffix='.png', delete=False)
f = open("img{:0>3}.png".format(t), 'w')
self.update_all(t=t)
windowToImage.Modified()
writer.SetFileName(f.name)
writer.Write()
slide_paths.append( f.name )
if len(slide_paths)==1:
if not outfile.endswith('.png'):
raise Exception("Cannot save single snapshot videos")
call(["mv","img000.png",outfile])
elif outfile.endswith('.mp4'):
call(["rm","-f", outfile])
call(["/usr/local/bin/ffmpeg",
"-i","img%03d.png",
"-c:v","libx264","-r","30",
"-pix_fmt","yuv420p", outfile])
call(["rm"]+slide_paths)
def play(self, timeout=1):
self.iren.Initialize()
if timeout is not None:
self.iren.AddObserver('TimerEvent', self.update_all)
self.timer = self.iren.CreateRepeatingTimer(timeout)
self.update_all()
self.iren.Start()
def add_actors(self, list_of_actors, label=False):
for actor in list_of_actors:
self.ren.AddActor(actor)
if label:
labelMapper = vtk.vtkLabeledDataMapper()
labelMapper.SetInput(actor.polydata)
labelActor = vtk.vtkActor2D()
labelActor.SetMapper(labelMapper)
self.ren.AddActor(labelActor)
| mit |
idlead/scikit-learn | examples/classification/plot_classification_probability.py | 138 | 2871 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting, and Gaussian process classification.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
kernel = 1.0 * RBF([1.0, 1.0]) # for GPC
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'),
'GPC': GaussianProcessClassifier(kernel)
}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
daodaoliang/neural-network-animation | matplotlib/tests/test_rcparams.py | 9 | 10258 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
import sys
import warnings
import matplotlib as mpl
from matplotlib.tests import assert_str_equal
from matplotlib.testing.decorators import cleanup, knownfailureif
from nose.tools import assert_true, assert_raises, assert_equal
from nose.plugins.skip import SkipTest
import nose
from itertools import chain
import numpy as np
from matplotlib.rcsetup import (validate_bool_maybe_none,
validate_stringlist,
validate_bool,
validate_nseq_int,
validate_nseq_float)
mpl.rc('text', usetex=False)
mpl.rc('lines', linewidth=22)
fname = os.path.join(os.path.dirname(__file__), 'test_rcparams.rc')
def test_rcparams():
usetex = mpl.rcParams['text.usetex']
linewidth = mpl.rcParams['lines.linewidth']
# test context given dictionary
with mpl.rc_context(rc={'text.usetex': not usetex}):
assert mpl.rcParams['text.usetex'] == (not usetex)
assert mpl.rcParams['text.usetex'] == usetex
# test context given filename (mpl.rc sets linewdith to 33)
with mpl.rc_context(fname=fname):
assert mpl.rcParams['lines.linewidth'] == 33
assert mpl.rcParams['lines.linewidth'] == linewidth
# test context given filename and dictionary
with mpl.rc_context(fname=fname, rc={'lines.linewidth': 44}):
assert mpl.rcParams['lines.linewidth'] == 44
assert mpl.rcParams['lines.linewidth'] == linewidth
# test rc_file
try:
mpl.rc_file(fname)
assert mpl.rcParams['lines.linewidth'] == 33
finally:
mpl.rcParams['lines.linewidth'] = linewidth
def test_RcParams_class():
rc = mpl.RcParams({'font.cursive': ['Apple Chancery',
'Textile',
'Zapf Chancery',
'cursive'],
'font.family': 'sans-serif',
'font.weight': 'normal',
'font.size': 12})
if six.PY3:
expected_repr = """
RcParams({'font.cursive': ['Apple Chancery',
'Textile',
'Zapf Chancery',
'cursive'],
'font.family': ['sans-serif'],
'font.size': 12.0,
'font.weight': 'normal'})""".lstrip()
else:
expected_repr = """
RcParams({u'font.cursive': [u'Apple Chancery',
u'Textile',
u'Zapf Chancery',
u'cursive'],
u'font.family': [u'sans-serif'],
u'font.size': 12.0,
u'font.weight': u'normal'})""".lstrip()
assert_str_equal(expected_repr, repr(rc))
if six.PY3:
expected_str = """
font.cursive: ['Apple Chancery', 'Textile', 'Zapf Chancery', 'cursive']
font.family: ['sans-serif']
font.size: 12.0
font.weight: normal""".lstrip()
else:
expected_str = """
font.cursive: [u'Apple Chancery', u'Textile', u'Zapf Chancery', u'cursive']
font.family: [u'sans-serif']
font.size: 12.0
font.weight: normal""".lstrip()
assert_str_equal(expected_str, str(rc))
# test the find_all functionality
assert ['font.cursive', 'font.size'] == sorted(rc.find_all('i[vz]').keys())
assert ['font.family'] == list(six.iterkeys(rc.find_all('family')))
# remove know failure + warnings after merging to master
@knownfailureif(not (sys.version_info[:2] < (2, 7)))
def test_rcparams_update():
if sys.version_info[:2] < (2, 7):
raise nose.SkipTest("assert_raises as context manager "
"not supported with Python < 2.7")
rc = mpl.RcParams({'figure.figsize': (3.5, 42)})
bad_dict = {'figure.figsize': (3.5, 42, 1)}
# make sure validation happens on input
with assert_raises(ValueError):
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
message='.*(validate)',
category=UserWarning)
rc.update(bad_dict)
# remove know failure + warnings after merging to master
@knownfailureif(not (sys.version_info[:2] < (2, 7)))
def test_rcparams_init():
if sys.version_info[:2] < (2, 7):
raise nose.SkipTest("assert_raises as context manager "
"not supported with Python < 2.7")
with assert_raises(ValueError):
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
message='.*(validate)',
category=UserWarning)
mpl.RcParams({'figure.figsize': (3.5, 42, 1)})
@cleanup
def test_Bug_2543():
# Test that it possible to add all values to itself / deepcopy
# This was not possible because validate_bool_maybe_none did not
# accept None as an argument.
# https://github.com/matplotlib/matplotlib/issues/2543
# We filter warnings at this stage since a number of them are raised
# for deprecated rcparams as they should. We dont want these in the
# printed in the test suite.
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
message='.*(deprecated|obsolete)',
category=UserWarning)
with mpl.rc_context():
_copy = mpl.rcParams.copy()
for key in six.iterkeys(_copy):
mpl.rcParams[key] = _copy[key]
mpl.rcParams['text.dvipnghack'] = None
with mpl.rc_context():
from copy import deepcopy
_deep_copy = deepcopy(mpl.rcParams)
# real test is that this does not raise
assert_true(validate_bool_maybe_none(None) is None)
assert_true(validate_bool_maybe_none("none") is None)
_fonttype = mpl.rcParams['svg.fonttype']
assert_true(_fonttype == mpl.rcParams['svg.embed_char_paths'])
with mpl.rc_context():
mpl.rcParams['svg.embed_char_paths'] = False
assert_true(mpl.rcParams['svg.fonttype'] == "none")
@cleanup
def test_Bug_2543_newer_python():
# only split from above because of the usage of assert_raises
# as a context manager, which only works in 2.7 and above
if sys.version_info[:2] < (2, 7):
raise nose.SkipTest("assert_raises as context manager not supported with Python < 2.7")
from matplotlib.rcsetup import validate_bool_maybe_none, validate_bool
with assert_raises(ValueError):
validate_bool_maybe_none("blah")
with assert_raises(ValueError):
validate_bool(None)
with assert_raises(ValueError):
with mpl.rc_context():
mpl.rcParams['svg.fonttype'] = True
if __name__ == '__main__':
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
def _validation_test_helper(validator, arg, target):
res = validator(arg)
assert_equal(res, target)
def _validation_fail_helper(validator, arg, exception_type):
if sys.version_info[:2] < (2, 7):
raise nose.SkipTest("assert_raises as context manager not "
"supported with Python < 2.7")
with assert_raises(exception_type):
validator(arg)
def test_validators():
validation_tests = (
{'validator': validate_bool,
'success': chain(((_, True) for _ in
('t', 'y', 'yes', 'on', 'true', '1', 1, True)),
((_, False) for _ in
('f', 'n', 'no', 'off', 'false', '0', 0, False))),
'fail': ((_, ValueError)
for _ in ('aardvark', 2, -1, [], ))},
{'validator': validate_stringlist,
'success': (('', []),
('a,b', ['a', 'b']),
('aardvark', ['aardvark']),
('aardvark, ', ['aardvark']),
('aardvark, ,', ['aardvark']),
(['a', 'b'], ['a', 'b']),
(('a', 'b'), ['a', 'b']),
((1, 2), ['1', '2'])),
'fail': ((dict(), AssertionError),
(1, AssertionError),)
},
{'validator': validate_nseq_int(2),
'success': ((_, [1, 2])
for _ in ('1, 2', [1.5, 2.5], [1, 2],
(1, 2), np.array((1, 2)))),
'fail': ((_, ValueError)
for _ in ('aardvark', ('a', 1),
(1, 2, 3)
))
},
{'validator': validate_nseq_float(2),
'success': ((_, [1.5, 2.5])
for _ in ('1.5, 2.5', [1.5, 2.5], [1.5, 2.5],
(1.5, 2.5), np.array((1.5, 2.5)))),
'fail': ((_, ValueError)
for _ in ('aardvark', ('a', 1),
(1, 2, 3)
))
}
)
for validator_dict in validation_tests:
validator = validator_dict['validator']
for arg, target in validator_dict['success']:
yield _validation_test_helper, validator, arg, target
for arg, error_type in validator_dict['fail']:
yield _validation_fail_helper, validator, arg, error_type
def test_keymaps():
key_list = [k for k in mpl.rcParams if 'keymap' in k]
for k in key_list:
assert(isinstance(mpl.rcParams[k], list))
def test_rcparams_reset_after_fail():
# There was previously a bug that meant that if rc_context failed and
# raised an exception due to issues in the supplied rc parameters, the
# global rc parameters were left in a modified state.
if sys.version_info[:2] >= (2, 7):
from collections import OrderedDict
else:
raise SkipTest("Test can only be run in Python >= 2.7 as it requires OrderedDict")
with mpl.rc_context(rc={'text.usetex': False}):
assert mpl.rcParams['text.usetex'] is False
with assert_raises(KeyError):
with mpl.rc_context(rc=OrderedDict([('text.usetex', True),('test.blah', True)])):
pass
assert mpl.rcParams['text.usetex'] is False
| mit |
lazywei/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 114 | 11393 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.ones((10, 2))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 2))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
| bsd-3-clause |
hugobowne/scikit-learn | examples/calibration/plot_calibration.py | 33 | 4794 | """
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see http://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <[email protected]>
# Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.model_selection import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
| bsd-3-clause |
walterreade/scikit-learn | sklearn/__check_build/__init__.py | 345 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
ipashchenko/ml4vs | ml4vs/nnet.py | 1 | 12347 | # -*- coding: utf-8 -*-
import glob
import os
import numpy as np
import pandas as pd
from sklearn_evaluation.plot import confusion_matrix as plot_cm
# from data_load import load_data
import numpy
from keras import callbacks
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.constraints import maxnorm
from keras.optimizers import SGD
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.cross_validation import StratifiedShuffleSplit, StratifiedKFold
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler
from sklearn.preprocessing import Imputer
from sklearn.metrics import confusion_matrix
from utils import print_cm_summary
import matplotlib.pyplot as plt
def shift_log_transform(df, name, shift):
df[name] = np.log(df[name] + shift)
def load_data(fnames, names, names_to_delete):
"""
Function that loads data from series of files where first file contains
class of zeros and other files - classes of ones.
:param fnames:
Iterable of file names.
:param names:
Names of columns in files.
:param names_to_delete:
Column names to delete.
:return:
X, y - ``sklearn`` arrays of features & responces.
"""
# Load data
dfs = list()
for fn in fnames:
dfs.append(pd.read_table(fn, names=names, engine='python',
na_values='+inf', sep=r"\s*",
usecols=range(30)))
# Remove meaningless features
delta = list()
for df in dfs:
delta.append(df['CSSD'].min())
delta = np.min([d for d in delta if not np.isinf(d)])
print "delta = {}".format(delta)
for df in dfs:
for name in names_to_delete:
del df[name]
try:
shift_log_transform(df, 'CSSD', -delta + 0.1)
except KeyError:
pass
# List of feature names
features_names = list(dfs[0])
# Count number of NaN for each feature
for i, df in enumerate(dfs):
print("File {}".format(i))
for feature in features_names:
print("Feature {} has {} NaNs".format(feature,
df[feature].isnull().sum()))
print("=======================")
# Convert to numpy arrays
# Features
X = list()
for df in dfs:
X.append(np.array(df[list(features_names)].values, dtype=float))
X = np.vstack(X)
# Responses
y = np.zeros(len(X))
y[len(dfs[0]):] = np.ones(len(X) - len(dfs[0]))
df = pd.concat(dfs)
df['variable'] = y
return X, y, df, features_names, delta
def load_data_tgt(fname, names, names_to_delete, delta):
"""
Function that loads target data for classification.
:param fname:
Target data file.
:param names:
Names of columns in files.
:param names_to_delete:
Column names to delete.
:return:
X, ``sklearn`` array of features, list of feature names
"""
# Load data
df = pd.read_table(fname, names=names, engine='python', na_values='+inf',
sep=r"\s*", usecols=range(30))
for name in names_to_delete:
del df[name]
try:
shift_log_transform(df, 'CSSD', -delta + 0.1)
except KeyError:
pass
# List of feature names
features_names = list(df)
# Count number of NaN for each feature
for feature in features_names:
print("Feature {} has {} NaNs".format(feature,
df[feature].isnull().sum()))
print("=======================")
# Convert to numpy arrays
# Features
X = np.array(df[list(features_names)].values, dtype=float)
# Original data
df = pd.read_table(fname, names=names, engine='python', na_values='+inf',
sep=r"\s*", usecols=range(30))
return X, features_names, df
remote = callbacks.RemoteMonitor(root='http://localhost:9000')
# fix random seed for reproducibility
seed = 1
numpy.random.seed(seed)
# load dataset
data_dir = '/home/ilya/code/ml4vs/data/dataset_OGLE/indexes_normalized'
file_1 = 'vast_lightcurve_statistics_normalized_variables_only.log'
file_0 = 'vast_lightcurve_statistics_normalized_constant_only.log'
file_0 = os.path.join(data_dir, file_0)
file_1 = os.path.join(data_dir, file_1)
names = ['Magnitude', 'clipped_sigma', 'meaningless_1', 'meaningless_2',
'star_ID', 'weighted_sigma', 'skew', 'kurt', 'I', 'J', 'K', 'L',
'Npts', 'MAD', 'lag1', 'RoMS', 'rCh2', 'Isgn', 'Vp2p', 'Jclp', 'Lclp',
'Jtim', 'Ltim', 'CSSD', 'Ex', 'inv_eta', 'E_A', 'S_B', 'NXS', 'IQR']
names_to_delete = ['Magnitude', 'meaningless_1', 'meaningless_2', 'star_ID',
'Npts']
X, y, df, feature_names, delta = load_data([file_0, file_1], names, names_to_delete)
n_cv_iter = 5
def create_baseline():
# create model
model = Sequential()
model.add(Dense(25, input_dim=25, init='normal', activation='relu',
W_constraint=maxnorm(3)))
model.add(Dropout(0.1))
model.add(Dense(25, init='normal', activation='relu',
W_constraint=maxnorm(3)))
model.add(Dropout(0.1))
model.add(Dense(13, init='normal', activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(1, init='normal', activation='sigmoid'))
# Compile model
learning_rate = 0.1
decay_rate = learning_rate / epochs
momentum = 0.90
sgd = SGD(lr=learning_rate, decay=decay_rate, momentum=momentum,
nesterov=False)
model.compile(loss='binary_crossentropy', optimizer=sgd,
metrics=['accuracy'])
return model
epochs = 50
# epochs = 125
batch_size = 12
estimators = list()
estimators.append(('imputer', Imputer(missing_values='NaN', strategy='median',
axis=0, verbose=2)))
estimators.append(('scaler', StandardScaler()))
estimators.append(('mlp', KerasClassifier(build_fn=create_baseline,
nb_epoch=epochs,
batch_size=batch_size,
verbose=0)))
skf = StratifiedKFold(y, n_folds=4, shuffle=True, random_state=seed)
pipeline = Pipeline(estimators)
results = cross_val_score(pipeline, X, y, cv=skf, scoring='f1', n_jobs=3)
print("\n")
print(results)
print("\n")
print("Baseline: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
print("\n")
results = cross_val_score(pipeline, X, y, cv=skf, scoring='roc_auc', n_jobs=3)
print("\n")
print(results)
print("\n")
print("Baseline: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
print("\n")
# Load blind test data
file_tgt = 'LMC_SC19_PSF_Pgood98__vast_lightcurve_statistics_normalized.log'
file_tgt = os.path.join(data_dir, file_tgt)
X_tgt, feature_names, df = load_data_tgt(file_tgt, names, names_to_delete,
delta)
pipeline.fit(X, y, mlp__batch_size=batch_size, mlp__nb_epoch=epochs)
model = pipeline.named_steps['mlp']
y_pred = model.predict(X_tgt)
y_probs = model.predict_proba(X_tgt)
idx = y_probs[:, 1] > 0.5
idx_ = y_probs[:, 1] < 0.5
nns_no = list(df['star_ID'][idx_])
print("Found {} variables".format(np.count_nonzero(idx)))
with open('nn_results.txt', 'w') as fo:
for line in list(df['star_ID'][idx]):
fo.write(line + '\n')
# Found negatives
nns_no = set([line.strip().split('_')[4].split('.')[0] for line in nns_no])
with open('clean_list_of_new_variables.txt', 'r') as fo:
news = fo.readlines()
news = [line.strip().split(' ')[1] for line in news]
with open('nn_results.txt', 'r') as fo:
nns = fo.readlines()
nns = [line.strip().split('_')[4].split('.')[0] for line in nns]
nns = set(nns)
# New variables discovered by GBC
news = set(news)
# 11 new variables are found
len(news.intersection(nns))
# It was found
'181193' in nns
with open('candidates_50perc_threshold.txt', 'r') as fo:
c50 = fo.readlines()
c50 = [line.strip("\", ', \", \n, }, {") for line in c50]
with open('variables_not_in_catalogs.txt', 'r') as fo:
not_in_cat = fo.readlines()
nic = [line.strip().split(' ')[1] for line in not_in_cat]
# Catalogue variables
cat_vars = set(c50).difference(set(nic))
# Non-catalogue variable
noncat_vars = set([line.strip().split(' ')[1] for line in not_in_cat if 'CST' not in line])
# All variables
all_vars = news.union(cat_vars).union(noncat_vars)
# Number of true positives
# 145
len(all_vars.intersection(nns))
# Number of false negatives
# 43
len(nns_no.intersection(all_vars))
# # Check overfitting
# sss = StratifiedShuffleSplit(y, n_iter=1, test_size=1. / n_cv_iter,
# random_state=seed)
# for train_index, test_index in sss:
# X_train, X_test = X[train_index], X[test_index]
# y_train, y_test = y[train_index], y[test_index]
#
# import keras
# history = keras.callbacks.History()
# print("Fitting...")
# X_test_ = X_test.copy()
# X_train_ = X_train.copy()
# for name, transform in pipeline.steps[:-1]:
# print(name, transform)
# transform.fit(X_train_)
# X_test_ = transform.transform(X_test_)
# X_train_ = transform.transform(X_train_)
# pipeline.fit(X_train, y_train, mlp__validation_data=(X_test_, y_test),
# mlp__batch_size=batch_size, mlp__nb_epoch=epochs,
# mlp__callbacks=[history])
# model = pipeline.named_steps['mlp']
#
# y_pred = model.predict(X_test_)
# y_pred[y_pred < 0.5] = 0.
# y_pred[y_pred >= 0.5] = 1.
# y_probs = model.predict_proba(X_test_)
# cm = confusion_matrix(y_test, y_pred)
# print_cm_summary(cm)
#
# plt.plot(history.history['acc'])
# plt.plot(history.history['val_acc'])
# plt.title('model accuracy')
# plt.ylabel('accuracy')
# plt.xlabel('epoch')
# plt.legend(['train', 'test'], loc='upper left')
# plt.show()
# # summarize history for loss
# plt.plot(history.history['loss'])
# plt.plot(history.history['val_loss'])
# plt.title('model loss')
# plt.ylabel('loss')
# plt.xlabel('epoch')
# plt.legend(['train', 'test'], loc='upper left')
# plt.show()
# # Build several cm
# skf = StratifiedKFold(y, n_folds=4, shuffle=True, random_state=seed)
# for train_index, test_index in skf:
# X_train, X_test = X[train_index], X[test_index]
# y_train, y_test = y[train_index], y[test_index]
#
# import keras
# history = keras.callbacks.History()
# print("Fitting...")
# X_test_ = X_test.copy()
# X_train_ = X_train.copy()
# estimators = list()
# estimators.append(('imputer', Imputer(missing_values='NaN', strategy='mean',
# axis=0, verbose=2)))
# estimators.append(('scaler', StandardScaler()))
# estimators.append(('mlp', KerasClassifier(build_fn=create_baseline,
# nb_epoch=epochs,
# batch_size=batch_size,
# verbose=0)))
# pipeline = Pipeline(estimators)
# for name, transform in pipeline.steps[:-1]:
# print(name, transform)
# transform.fit(X_train_)
# X_test_ = transform.transform(X_test_)
# X_train_ = transform.transform(X_train_)
# pipeline.fit(X_train, y_train, mlp__validation_data=(X_test_, y_test),
# mlp__batch_size=batch_size, mlp__nb_epoch=epochs,
# mlp__callbacks=[history])
# model = pipeline.named_steps['mlp']
#
# y_pred = model.predict(X_test_)
# y_pred[y_pred < 0.5] = 0.
# y_pred[y_pred >= 0.5] = 1.
# y_probs = model.predict_proba(X_test_)
# cm = confusion_matrix(y_test, y_pred)
# print_cm_summary(cm)
#
# # summarize history for loss
# plt.plot(history.history['acc'])
# plt.plot(history.history['val_acc'])
# plt.title('model accuracy')
# plt.ylabel('accuracy')
# plt.xlabel('epoch')
# plt.legend(['train', 'test'], loc='upper left')
# plt.show()
# plt.plot(history.history['loss'])
# plt.plot(history.history['val_loss'])
# plt.title('model loss')
# plt.ylabel('loss')
# plt.xlabel('epoch')
# plt.legend(['train', 'test'], loc='upper left')
# plt.show()
| mit |
haraldschilly/smc | src/smc_sagews/smc_sagews/sage_server.py | 3 | 69396 | #!/usr/bin/env python
"""
sage_server.py -- unencrypted forking TCP server.
Note: I wrote functionality so this can run as root, create accounts on the fly,
and serve sage as those accounts. Doing this is horrendous from a security point of
view, and I'm definitely not doing this. None of that functionality is actually
used in https://cloud.sagemath.com!
For debugging, this may help:
killemall sage_server.py && sage --python sage_server.py -p 6000
"""
# NOTE: This file is GPL'd
# because it imports the Sage library. This file is not directly
# imported by anything else in Salvus; the Python process it runs is
# used over a TCP connection.
#########################################################################################
# Copyright (C) 2013 William Stein <[email protected]> #
# #
# Distributed under the terms of the GNU General Public License (GPL), version 2+ #
# #
# http://www.gnu.org/licenses/ #
#########################################################################################
# Add the path that contains this file to the Python load path, so we
# can import other files from there.
import os, sys, time
# used for clearing pylab figure
pylab = None
# Maximum number of distinct (non-once) output messages per cell; when this number is
# exceeded, an exception is raised; this reduces the chances of the user creating
# a huge unusable worksheet.
MAX_OUTPUT_MESSAGES = 256
# stdout, stderr, html, etc. that exceeds this many characters will be truncated to avoid
# killing the client.
MAX_STDOUT_SIZE = MAX_STDERR_SIZE = MAX_CODE_SIZE = MAX_HTML_SIZE = MAX_MD_SIZE = MAX_TEX_SIZE = 40000
MAX_OUTPUT = 150000
# We import the notebook interact, which we will monkey patch below,
# first, since importing later causes trouble in sage>=5.6.
import sagenb.notebook.interact
# Standard imports.
import json, resource, shutil, signal, socket, struct, \
tempfile, time, traceback, pwd
import sage_parsing, sage_salvus
uuid = sage_salvus.uuid
def unicode8(s):
# I evidently don't understand Python unicode... Do the following for now:
# TODO: see http://stackoverflow.com/questions/21897664/why-does-unicodeu-passed-an-errors-parameter-raise-typeerror for how to fix.
try:
return unicode(s, 'utf8')
except:
try:
return unicode(s)
except:
return s
LOGFILE = os.path.realpath(__file__)[:-3] + ".log"
PID = os.getpid()
from datetime import datetime
def log(*args):
#print "logging to %s"%LOGFILE
try:
debug_log = open(LOGFILE, 'a')
mesg = "%s (%s): %s\n"%(PID, datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3], ' '.join([unicode8(x) for x in args]))
debug_log.write(mesg)
debug_log.flush()
except:
log("an error writing a log message (ignoring)")
# Determine the info object, if available. There's no good reason
# it wouldn't be available, unless a user explicitly deleted it, but
# we may as well try to be robust to this, especially if somebody
# were to try to use this server outside of cloud.sagemath.com.
_info_path = os.path.join(os.environ['SMC'], 'info.json')
if os.path.exists(_info_path):
INFO = json.loads(open(_info_path).read())
else:
INFO = {}
if 'base_url' not in INFO:
INFO['base_url'] = ''
# Configure logging
#logging.basicConfig()
#log = logging.getLogger('sage_server')
#log.setLevel(logging.INFO)
# A CoffeeScript version of this function is in misc_node.coffee.
import hashlib
def uuidsha1(data):
sha1sum = hashlib.sha1()
sha1sum.update(data)
s = sha1sum.hexdigest()
t = 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'
r = list(t)
j = 0
for i in range(len(t)):
if t[i] == 'x':
r[i] = s[j]; j += 1
elif t[i] == 'y':
# take 8 + low order 3 bits of hex number.
r[i] = hex( (int(s[j],16)&0x3) |0x8)[-1]; j += 1
return ''.join(r)
# A tcp connection with support for sending various types of messages, especially JSON.
class ConnectionJSON(object):
def __init__(self, conn):
assert not isinstance(conn, ConnectionJSON) # avoid common mistake -- conn is supposed to be from socket.socket...
self._conn = conn
def close(self):
self._conn.close()
def _send(self, s):
length_header = struct.pack(">L", len(s))
self._conn.send(length_header + s)
def send_json(self, m):
m = json.dumps(m)
log(u"sending message '", truncate_text(m, 256), u"'")
self._send('j' + m)
return len(m)
def send_blob(self, blob):
s = uuidsha1(blob)
self._send('b' + s + blob)
return s
def send_file(self, filename):
log("sending file '%s'"%filename)
f = open(filename, 'rb')
data = f.read()
f.close()
return self.send_blob(data)
def _recv(self, n):
#print "_recv(%s)"%n
for i in range(20): # see http://stackoverflow.com/questions/3016369/catching-blocking-sigint-during-system-call
try:
#print "blocking recv (i = %s), pid=%s"%(i, os.getpid())
r = self._conn.recv(n)
#log("n=%s; received: '%s' of len %s"%(n,r, len(r)))
return r
except socket.error as (errno, msg):
#print "socket.error, msg=%s"%msg
if errno != 4:
raise
raise EOFError
def recv(self):
n = self._recv(4)
if len(n) < 4:
raise EOFError
n = struct.unpack('>L', n)[0] # big endian 32 bits
s = self._recv(n)
while len(s) < n:
t = self._recv(n - len(s))
if len(t) == 0:
raise EOFError
s += t
if s[0] == 'j':
try:
return 'json', json.loads(s[1:])
except Exception, msg:
log("Unable to parse JSON '%s'"%s[1:])
raise
elif s[0] == 'b':
return 'blob', s[1:]
raise ValueError("unknown message type '%s'"%s[0])
TRUNCATE_MESG = "WARNING: Output truncated. Type 'smc?' to learn how to raise the output limit."
def truncate_text(s, max_size):
if len(s) > max_size:
return s[:max_size] + "[...]", True
else:
return s, False
class Message(object):
def _new(self, event, props={}):
m = {'event':event}
for key, val in props.iteritems():
if key != 'self':
m[key] = val
return m
def start_session(self):
return self._new('start_session')
def session_description(self, pid):
return self._new('session_description', {'pid':pid})
def send_signal(self, pid, signal=signal.SIGINT):
return self._new('send_signal', locals())
def terminate_session(self, done=True):
return self._new('terminate_session', locals())
def execute_code(self, id, code, preparse=True):
return self._new('execute_code', locals())
def execute_javascript(self, code, obj=None, coffeescript=False):
return self._new('execute_javascript', locals())
def output(self, id,
stdout = None,
stderr = None,
code = None,
html = None,
javascript = None,
coffeescript = None,
interact = None,
md = None,
tex = None,
d3 = None,
file = None,
raw_input = None,
obj = None,
done = None,
once = None,
hide = None,
show = None,
auto = None,
events = None,
clear = None,
delete_last = None):
m = self._new('output')
m['id'] = id
t = truncate_text
did_truncate = False
import sage_server # we do this so that the user can customize the MAX's below.
if code is not None:
code['source'], did_truncate = t(code['source'], sage_server.MAX_CODE_SIZE)
m['code'] = code
if stderr is not None and len(stderr) > 0:
m['stderr'], did_truncate = t(stderr, sage_server.MAX_STDERR_SIZE)
if stdout is not None and len(stdout) > 0:
m['stdout'], did_truncate = t(stdout, sage_server.MAX_STDOUT_SIZE)
if html is not None and len(html) > 0:
m['html'], did_truncate = t(html, sage_server.MAX_HTML_SIZE)
if md is not None and len(md) > 0:
m['md'], did_truncate = t(md, sage_server.MAX_MD_SIZE)
if tex is not None and len(tex)>0:
tex['tex'], did_truncate = t(tex['tex'], sage_server.MAX_TEX_SIZE)
m['tex'] = tex
if javascript is not None: m['javascript'] = javascript
if coffeescript is not None: m['coffeescript'] = coffeescript
if interact is not None: m['interact'] = interact
if d3 is not None: m['d3'] = d3
if obj is not None: m['obj'] = json.dumps(obj)
if file is not None: m['file'] = file # = {'filename':..., 'uuid':...}
if raw_input is not None: m['raw_input'] = raw_input
if done is not None: m['done'] = done
if once is not None: m['once'] = once
if hide is not None: m['hide'] = hide
if show is not None: m['show'] = show
if auto is not None: m['auto'] = auto
if events is not None: m['events'] = events
if clear is not None: m['clear'] = clear
if delete_last is not None: m['delete_last'] = delete_last
if did_truncate:
if 'stderr' in m:
m['stderr'] += '\n' + TRUNCATE_MESG
else:
m['stderr'] = '\n' + TRUNCATE_MESG
return m
def introspect_completions(self, id, completions, target):
m = self._new('introspect_completions', locals())
m['id'] = id
return m
def introspect_docstring(self, id, docstring, target):
m = self._new('introspect_docstring', locals())
m['id'] = id
return m
def introspect_source_code(self, id, source_code, target):
m = self._new('introspect_source_code', locals())
m['id'] = id
return m
message = Message()
whoami = os.environ['USER']
def client1(port, hostname):
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.connect((hostname, int(port)))
conn = ConnectionJSON(conn)
conn.send_json(message.start_session())
typ, mesg = conn.recv()
pid = mesg['pid']
print "PID = %s"%pid
id = 0
while True:
try:
code = sage_parsing.get_input('sage [%s]: '%id)
if code is None: # EOF
break
conn.send_json(message.execute_code(code=code, id=id))
while True:
typ, mesg = conn.recv()
if mesg['event'] == 'terminate_session':
return
elif mesg['event'] == 'output':
if 'stdout' in mesg:
sys.stdout.write(mesg['stdout']); sys.stdout.flush()
if 'stderr' in mesg:
print '! ' + '\n! '.join(mesg['stderr'].splitlines())
if 'done' in mesg and mesg['id'] >= id:
break
id += 1
except KeyboardInterrupt:
print "Sending interrupt signal"
conn2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn2.connect((hostname, int(port)))
conn2 = ConnectionJSON(conn2)
conn2.send_json(message.send_signal(pid))
del conn2
id += 1
conn.send_json(message.terminate_session())
print "\nExiting Sage client."
class BufferedOutputStream(object):
def __init__(self, f, flush_size=4096, flush_interval=.1):
self._f = f
self._buf = ''
self._flush_size = flush_size
self._flush_interval = flush_interval
self.reset()
def reset(self):
self._last_flush_time = time.time()
def fileno(self):
return 0
def write(self, output):
self._buf += output
#self.flush()
t = time.time()
if ((len(self._buf) >= self._flush_size) or
(t - self._last_flush_time >= self._flush_interval)):
self.flush()
self._last_flush_time = t
def flush(self, done=False):
if not self._buf and not done:
# no point in sending an empty message
return
self._f(self._buf, done=done)
self._buf = ''
def isatty(self):
return False
# This will *have* to be re-done using Cython for speed.
class Namespace(dict):
def __init__(self, x):
self._on_change = {}
self._on_del = {}
dict.__init__(self, x)
def on(self, event, x, f):
if event == 'change':
if x not in self._on_change:
self._on_change[x] = []
self._on_change[x].append(f)
elif event == 'del':
if x not in self._on_del:
self._on_del[x] = []
self._on_del[x].append(f)
def remove(self, event, x, f):
if event == 'change' and self._on_change.has_key(x):
v = self._on_change[x]
i = v.find(f)
if i != -1:
del v[i]
if len(v) == 0:
del self._on_change[x]
elif event == 'del' and self._on_del.has_key(x):
v = self._on_del[x]
i = v.find(f)
if i != -1:
del v[i]
if len(v) == 0:
del self._on_del[x]
def __setitem__(self, x, y):
dict.__setitem__(self, x, y)
try:
if self._on_change.has_key(x):
for f in self._on_change[x]:
f(y)
if self._on_change.has_key(None):
for f in self._on_change[None]:
f(x, y)
except Exception, mesg:
print mesg
def __delitem__(self, x):
try:
if self._on_del.has_key(x):
for f in self._on_del[x]:
f()
if self._on_del.has_key(None):
for f in self._on_del[None]:
f(x)
except Exception, mesg:
print mesg
dict.__delitem__(self, x)
def set(self, x, y, do_not_trigger=None):
dict.__setitem__(self, x, y)
if self._on_change.has_key(x):
if do_not_trigger is None:
do_not_trigger = []
for f in self._on_change[x]:
if f not in do_not_trigger:
f(y)
if self._on_change.has_key(None):
for f in self._on_change[None]:
f(x,y)
class TemporaryURL:
def __init__(self, url, ttl):
self.url = url
self.ttl = ttl
def __repr__(self):
return repr(self.url)
def __str__(self):
return self.url
namespace = Namespace({})
class Salvus(object):
"""
Cell execution state object and wrapper for access to special SageMathCloud functionality.
An instance of this object is created each time you execute a cell. It has various methods
for sending different types of output messages, links to files, etc. Type 'help(smc)' for
more details.
OUTPUT LIMITATIONS -- There is an absolute limit on the number of messages output for a given
cell, and also the size of the output message for each cell. You can access or change
those limits dynamically in a worksheet as follows by viewing or changing any of the
following variables::
sage_server.MAX_STDOUT_SIZE # max length of each stdout output message
sage_server.MAX_STDERR_SIZE # max length of each stderr output message
sage_server.MAX_MD_SIZE # max length of each md (markdown) output message
sage_server.MAX_HTML_SIZE # max length of each html output message
sage_server.MAX_TEX_SIZE # max length of tex output message
sage_server.MAX_OUTPUT_MESSAGES # max number of messages output for a cell.
And::
sage_server.MAX_OUTPUT # max total character output for a single cell; computation
# terminated/truncated if sum of above exceeds this.
"""
Namespace = Namespace
_prefix = ''
_postfix = ''
_default_mode = 'sage'
def _flush_stdio(self):
"""
Flush the standard output streams. This should be called before sending any message
that produces output.
"""
sys.stdout.flush()
sys.stderr.flush()
def __repr__(self):
return ''
def __init__(self, conn, id, data=None, cell_id=None, message_queue=None):
self._conn = conn
self._num_output_messages = 0
self._total_output_length = 0
self._output_warning_sent = False
self._id = id
self._done = True # done=self._done when last execute message is sent; e.g., set self._done = False to not close cell on code term.
self.data = data
self.cell_id = cell_id
self.namespace = namespace
self.message_queue = message_queue
self.code_decorators = [] # gets reset if there are code decorators
# Alias: someday remove all references to "salvus" and instead use smc.
# For now this alias is easier to think of and use.
namespace['smc'] = namespace['salvus'] = self # beware of circular ref?
# Monkey patch in our "require" command.
namespace['require'] = self.require
# Make the salvus object itself available when doing "from sage.all import *".
import sage.all
sage.all.salvus = self
def _send_output(self, *args, **kwds):
if self._output_warning_sent:
raise KeyboardInterrupt
mesg = message.output(*args, **kwds)
if not mesg.get('once',False):
self._num_output_messages += 1
import sage_server
if self._num_output_messages > sage_server.MAX_OUTPUT_MESSAGES:
self._output_warning_sent = True
err = "\nToo many output messages (at most %s per cell -- type 'smc?' to learn how to raise this limit): attempting to terminate..."%sage_server.MAX_OUTPUT_MESSAGES
self._conn.send_json(message.output(stderr=err, id=self._id, once=False, done=True))
raise KeyboardInterrupt
n = self._conn.send_json(mesg)
self._total_output_length += n
if self._total_output_length > sage_server.MAX_OUTPUT:
self._output_warning_sent = True
err = "\nOutput too long -- MAX_OUTPUT (=%s) exceed (type 'smc?' to learn how to raise this limit): attempting to terminate..."%sage_server.MAX_OUTPUT
self._conn.send_json(message.output(stderr=err, id=self._id, once=False, done=True))
raise KeyboardInterrupt
def obj(self, obj, done=False):
self._send_output(obj=obj, id=self._id, done=done)
return self
def link(self, filename, label=None, foreground=True, cls=''):
"""
Output a clickable link to a file somewhere in this project. The filename
path must be relative to the current working directory of the Python process.
The simplest way to use this is
salvus.link("../name/of/file") # any relative path to any file
This creates a link, which when clicked on, opens that file in the foreground.
If the filename is the name of a directory, clicking will instead
open the file browser on that directory:
salvus.link("../name/of/directory") # clicking on the resulting link opens a directory
If you would like a button instead of a link, pass cls='btn'. You can use any of
the standard Bootstrap button classes, e.g., btn-small, btn-large, btn-success, etc.
If you would like to change the text in the link (or button) to something
besides the default (filename), just pass arbitrary HTML to the label= option.
INPUT:
- filename -- a relative path to a file or directory
- label -- (default: the filename) html label for the link
- foreground -- (default: True); if True, opens link in the foreground
- cls -- (default: '') optional CSS classes, such as 'btn'.
EXAMPLES:
Use as a line decorator::
%salvus.link name/of/file.foo
Make a button::
salvus.link("foo/bar/", label="The Bar Directory", cls='btn')
Make two big blue buttons with plots in them::
plot(sin, 0, 20).save('sin.png')
plot(cos, 0, 20).save('cos.png')
for img in ['sin.png', 'cos.png']:
salvus.link(img, label="<img width='150px' src='%s'>"%salvus.file(img, show=False), cls='btn btn-large btn-primary')
"""
path = os.path.abspath(filename)[len(os.environ['HOME'])+1:]
if label is None:
label = filename
id = uuid()
self.html("<a class='%s' style='cursor:pointer'; id='%s'></a>"%(cls, id))
s = "$('#%s').html(obj.label).click(function() {%s; return false;});"%(id, self._action(path, foreground))
self.javascript(s, obj={'label':label, 'path':path, 'foreground':foreground}, once=False)
def _action(self, path, foreground):
if os.path.isdir(path):
action = "worksheet.project_page.chdir(obj.path);"
if foreground:
action += "worksheet.project_page.display_tab('project-file-listing');"
else:
action = "worksheet.project_page.open_file({'path':obj.path, 'foreground': obj.foreground});"
return action
def open_tab(self, filename, foreground=True):
"""
Open a new file (or directory) document in another tab.
See the documentation for salvus.link.
"""
path = os.path.abspath(filename)[len(os.environ['HOME'])+1:]
self.javascript(self._action(path, foreground),
obj = {'path':path, 'foreground':foreground}, once=True)
def close_tab(self, filename):
"""
Open an open file tab. The filename is relative to the current working directory.
"""
self.javascript("worksheet.editor.close(obj)", obj = filename, once=True)
def threed(self,
g, # sage Graphic3d object.
width = None,
height = None,
frame = True, # True/False or {'color':'black', 'thickness':.4, 'labels':True, 'fontsize':14, 'draw':True,
# 'xmin':?, 'xmax':?, 'ymin':?, 'ymax':?, 'zmin':?, 'zmax':?}
background = None,
foreground = None,
spin = False,
aspect_ratio = None,
frame_aspect_ratio = None, # synonym for aspect_ratio
done = False,
renderer = None, # None, 'webgl', or 'canvas'
):
from graphics import graphics3d_to_jsonable, json_float as f
# process options, combining ones set explicitly above with ones inherited from 3d scene
opts = { 'width':width, 'height':height,
'background':background, 'foreground':foreground,
'spin':spin, 'aspect_ratio':aspect_ratio,
'renderer':renderer}
extra_kwds = {} if g._extra_kwds is None else g._extra_kwds
# clean up and normalize aspect_ratio option
if aspect_ratio is None:
if frame_aspect_ratio is not None:
aspect_ratio = frame_aspect_ratio
elif 'frame_aspect_ratio' in extra_kwds:
aspect_ratio = extra_kwds['frame_aspect_ratio']
elif 'aspect_ratio' in extra_kwds:
aspect_ratio = extra_kwds['aspect_ratio']
if aspect_ratio is not None:
if aspect_ratio == 1 or aspect_ratio == "automatic":
aspect_ratio = None
elif not (isinstance(aspect_ratio, (list, tuple)) and len(aspect_ratio) == 3):
raise TypeError("aspect_ratio must be None, 1 or a 3-tuple, but it is '%s'"%(aspect_ratio,))
else:
aspect_ratio = [f(x) for x in aspect_ratio]
opts['aspect_ratio'] = aspect_ratio
for k in ['spin', 'height', 'width', 'background', 'foreground', 'renderer']:
if k in extra_kwds and not opts.get(k,None):
opts[k] = extra_kwds[k]
if not isinstance(opts['spin'], bool):
opts['spin'] = f(opts['spin'])
opts['width'] = f(opts['width'])
opts['height'] = f(opts['height'])
# determine the frame
b = g.bounding_box()
xmin, xmax, ymin, ymax, zmin, zmax = b[0][0], b[1][0], b[0][1], b[1][1], b[0][2], b[1][2]
fr = opts['frame'] = {'xmin':f(xmin), 'xmax':f(xmax),
'ymin':f(ymin), 'ymax':f(ymax),
'zmin':f(zmin), 'zmax':f(zmax)}
if isinstance(frame, dict):
for k in fr.keys():
if k in frame:
fr[k] = f(frame[k])
fr['draw'] = frame.get('draw', True)
fr['color'] = frame.get('color', None)
fr['thickness'] = f(frame.get('thickness', None))
fr['labels'] = frame.get('labels', None)
if 'fontsize' in frame:
fr['fontsize'] = int(frame['fontsize'])
elif isinstance(frame, bool):
fr['draw'] = frame
# convert the Sage graphics object to a JSON object that can be rendered
scene = {'opts' : opts,
'obj' : graphics3d_to_jsonable(g)}
# Store that object in the database, rather than sending it directly as an output message.
# We do this since obj can easily be quite large/complicated, and managing it as part of the
# document is too slow and doesn't scale.
blob = json.dumps(scene, separators=(',', ':'))
uuid = self._conn.send_blob(blob)
# flush output (so any text appears before 3d graphics, in case they are interleaved)
self._flush_stdio()
# send message pointing to the 3d 'file', which will get downloaded from database
self._send_output(id=self._id, file={'filename':unicode8("%s.sage3d"%uuid), 'uuid':uuid}, done=done)
def d3_graph(self, g, **kwds):
from graphics import graph_to_d3_jsonable
self._send_output(id=self._id, d3={"viewer":"graph", "data":graph_to_d3_jsonable(g, **kwds)})
def file(self, filename, show=True, done=False, download=False, once=False, events=None, raw=False):
"""
Display or provide a link to the given file. Raises a RuntimeError if this
is not possible, e.g, if the file is too large.
If show=True (the default), the browser will show the file,
or provide a clickable link to it if there is no way to show it.
If show=False, this function returns an object T such that
T.url (or str(t)) is a string of the form "/blobs/filename?uuid=the_uuid"
that can be used to access the file even if the file is immediately
deleted after calling this function (the file is stored in a database).
Also, T.ttl is the time to live (in seconds) of the object. A ttl of
0 means the object is permanently available.
raw=False (the default):
If you use the URL
/blobs/filename?uuid=the_uuid&download
then the server will include a header that tells the browser to
download the file to disk instead of displaying it. Only relatively
small files can be made available this way. However, they remain
available (for a day) even *after* the file is deleted.
NOTE: It is safe to delete the file immediately after this
function (salvus.file) returns.
raw=True:
Instead, the URL is to the raw file, which is served directly
from the project:
/project-id/raw/path/to/filename
This will only work if the file is not deleted; however, arbitrarily
large files can be streamed this way.
This function creates an output message {file:...}; if the user saves
a worksheet containing this message, then any referenced blobs are made
permanent in the database.
The uuid is based on the Sha-1 hash of the file content (it is computed using the
function sage_server.uuidsha1). Any two files with the same content have the
same Sha1 hash.
"""
filename = unicode8(filename)
if raw:
info = self.project_info()
path = os.path.abspath(filename)
home = os.environ[u'HOME'] + u'/'
if path.startswith(home):
path = path[len(home):]
else:
raise ValueError(u"can only send raw files in your home directory")
url = os.path.join(u'/',info['base_url'].strip('/'), info['project_id'], u'raw', path.lstrip('/'))
if show:
self._flush_stdio()
self._send_output(id=self._id, once=once, file={'filename':filename, 'url':url, 'show':show}, events=events, done=done)
return
else:
return TemporaryURL(url=url, ttl=0)
file_uuid = self._conn.send_file(filename)
mesg = None
while mesg is None:
self.message_queue.recv()
for i, (typ, m) in enumerate(self.message_queue.queue):
if typ == 'json' and m.get('event') == 'save_blob' and m.get('sha1') == file_uuid:
mesg = m
del self.message_queue[i]
break
if 'error' in mesg:
raise RuntimeError("error saving blob -- %s"%mesg['error'])
self._flush_stdio()
self._send_output(id=self._id, once=once, file={'filename':filename, 'uuid':file_uuid, 'show':show}, events=events, done=done)
if not show:
info = self.project_info()
url = u"%s/blobs/%s?uuid=%s"%(info['base_url'], filename, file_uuid)
if download:
url += u'?download'
return TemporaryURL(url=url, ttl=mesg.get('ttl',0))
def default_mode(self, mode=None):
"""
Set the default mode for cell evaluation. This is equivalent
to putting %mode at the top of any cell that does not start
with %. Use salvus.default_mode() to return the current mode.
Use salvus.default_mode("") to have no default mode.
This is implemented using salvus.cell_prefix.
"""
if mode is None:
return Salvus._default_mode
Salvus._default_mode = mode
if mode == "sage":
self.cell_prefix("")
else:
self.cell_prefix("%" + mode)
def cell_prefix(self, prefix=None):
"""
Make it so that the given prefix code is textually
prepending to the input before evaluating any cell, unless
the first character of the cell is a %.
To append code at the end, use cell_postfix.
INPUT:
- ``prefix`` -- None (to return prefix) or a string ("" to disable)
EXAMPLES:
Make it so every cell is timed:
salvus.cell_prefix('%time')
Make it so cells are typeset using latex, and latex comments are allowed even
as the first line.
salvus.cell_prefix('%latex')
%sage salvus.cell_prefix('')
Evaluate each cell using GP (Pari) and display the time it took:
salvus.cell_prefix('%time\n%gp')
%sage salvus.cell_prefix('') # back to normal
"""
if prefix is None:
return Salvus._prefix
else:
Salvus._prefix = prefix
def cell_postfix(self, postfix=None):
"""
Make it so that the given code is textually
appended to the input before evaluating a cell.
To prepend code at the beginning, use cell_prefix.
INPUT:
- ``postfix`` -- None (to return postfix) or a string ("" to disable)
EXAMPLES:
Print memory usage after evaluating each cell:
salvus.cell_postfix('print "%s MB used"%int(get_memory_usage())')
Return to normal
salvus.set_cell_postfix('')
"""
if postfix is None:
return Salvus._postfix
else:
Salvus._postfix = postfix
def execute(self, code, namespace=None, preparse=True, locals=None):
if namespace is None:
namespace = self.namespace
# clear pylab figure (takes a few microseconds)
if pylab is not None:
pylab.clf()
#code = sage_parsing.strip_leading_prompts(code) # broken -- wrong on "def foo(x):\n print x"
blocks = sage_parsing.divide_into_blocks(code)
for start, stop, block in blocks:
if preparse:
block = sage_parsing.preparse_code(block)
sys.stdout.reset(); sys.stderr.reset()
try:
b = block.rstrip()
if b.endswith('??'):
p = sage_parsing.introspect(block,
namespace=namespace, preparse=False)
self.code(source = p['result'], mode = "python")
elif b.endswith('?'):
p = sage_parsing.introspect(block, namespace=namespace, preparse=False)
self.code(source = p['result'], mode = "text/x-rst")
else:
exec compile(block+'\n', '', 'single') in namespace, locals
sys.stdout.flush()
sys.stderr.flush()
except:
sys.stdout.flush()
sys.stderr.write('Error in lines %s-%s\n'%(start+1, stop+1))
traceback.print_exc()
sys.stderr.flush()
break
def execute_with_code_decorators(self, code_decorators, code, preparse=True, namespace=None, locals=None):
"""
salvus.execute_with_code_decorators is used when evaluating
code blocks that are set to any non-default code_decorator.
"""
import sage # used below as a code decorator
if isinstance(code_decorators, (str, unicode)):
code_decorators = [code_decorators]
if preparse:
code_decorators = map(sage_parsing.preparse_code, code_decorators)
code_decorators = [eval(code_decorator, self.namespace) for code_decorator in code_decorators]
# The code itself may want to know exactly what code decorators are in effect.
# For example, r.eval can do extra things when being used as a decorator.
self.code_decorators = code_decorators
for i, code_decorator in enumerate(code_decorators):
# eval is for backward compatibility
if not hasattr(code_decorator, 'eval') and hasattr(code_decorator, 'before'):
code_decorators[i] = code_decorator.before(code)
for code_decorator in reversed(code_decorators):
if hasattr(code_decorator, 'eval'): # eval is for backward compatibility
print code_decorator.eval(code, locals=self.namespace),
code = ''
elif code_decorator is sage:
# special case -- the sage module (i.e., %sage) should do nothing.
pass
else:
code = code_decorator(code)
if code is None:
code = ''
if code != '' and isinstance(code, (str, unicode)):
self.execute(code, preparse=preparse, namespace=namespace, locals=locals)
for code_decorator in code_decorators:
if not hasattr(code_decorator, 'eval') and hasattr(code_decorator, 'after'):
code_decorator.after(code)
def html(self, html, done=False, once=None):
"""
Display html in the output stream.
EXAMPLE:
salvus.html("<b>Hi</b>")
"""
self._flush_stdio()
self._send_output(html=unicode8(html), id=self._id, done=done, once=once)
def md(self, md, done=False, once=None):
"""
Display markdown in the output stream.
EXAMPLE:
salvus.md("**Hi**")
"""
self._flush_stdio()
self._send_output(md=unicode8(md), id=self._id, done=done, once=once)
def pdf(self, filename, **kwds):
sage_salvus.show_pdf(filename, **kwds)
def tex(self, obj, display=False, done=False, once=None, **kwds):
"""
Display obj nicely using TeX rendering.
INPUT:
- obj -- latex string or object that is automatically be converted to TeX
- display -- (default: False); if True, typeset as display math (so centered, etc.)
"""
self._flush_stdio()
tex = obj if isinstance(obj, str) else self.namespace['latex'](obj, **kwds)
self._send_output(tex={'tex':tex, 'display':display}, id=self._id, done=done, once=once)
return self
def start_executing(self):
self._send_output(done=False, id=self._id)
def clear(self, done=False):
self._send_output(clear=True, id=self._id, done=done)
def delete_last_output(self, done=False):
self._send_output(delete_last=True, id=self._id, done=done)
def stdout(self, output, done=False, once=None):
"""
Send the string output (or unicode8(output) if output is not a
string) to the standard output stream of the compute cell.
INPUT:
- output -- string or object
"""
stdout = output if isinstance(output, (str, unicode)) else unicode8(output)
self._send_output(stdout=stdout, done=done, id=self._id, once=once)
return self
def stderr(self, output, done=False, once=None):
"""
Send the string output (or unicode8(output) if output is not a
string) to the standard error stream of the compute cell.
INPUT:
- output -- string or object
"""
stderr = output if isinstance(output, (str, unicode)) else unicode8(output)
self._send_output(stderr=stderr, done=done, id=self._id, once=once)
return self
def code(self, source, # actual source code
mode = None, # the syntax highlight codemirror mode
filename = None, # path of file it is contained in (if applicable)
lineno = -1, # line number where source starts (0-based)
done=False, once=None):
"""
Send a code message, which is to be rendered as code by the client, with
appropriate syntax highlighting, maybe a link to open the source file, etc.
"""
source = source if isinstance(source, (str, unicode)) else unicode8(source)
code = {'source' : source,
'filename' : filename,
'lineno' : int(lineno),
'mode' : mode}
self._send_output(code=code, done=done, id=self._id, once=once)
return self
def _execute_interact(self, id, vals):
if id not in sage_salvus.interacts:
print "(Evaluate this cell to use this interact.)"
#raise RuntimeError, "Error: No interact with id %s"%id
else:
sage_salvus.interacts[id](vals)
def interact(self, f, done=False, once=None, **kwds):
I = sage_salvus.InteractCell(f, **kwds)
self._flush_stdio()
self._send_output(interact = I.jsonable(), id=self._id, done=done, once=once)
return sage_salvus.InteractFunction(I)
def javascript(self, code, once=False, coffeescript=False, done=False, obj=None):
"""
Execute the given Javascript code as part of the output
stream. This same code will be executed (at exactly this
point in the output stream) every time the worksheet is
rendered.
See the docs for the top-level javascript function for more details.
INPUT:
- code -- a string
- once -- boolean (default: FAlse); if True the Javascript is
only executed once, not every time the cell is loaded. This
is what you would use if you call salvus.stdout, etc. Use
once=False, e.g., if you are using javascript to make a DOM
element draggable (say). WARNING: If once=True, then the
javascript is likely to get executed before other output to
a given cell is even rendered.
- coffeescript -- boolean (default: False); if True, the input
code is first converted from CoffeeScript to Javascript.
At least the following Javascript objects are defined in the
scope in which the code is evaluated::
- cell -- jQuery wrapper around the current compute cell
- salvus.stdout, salvus.stderr, salvus.html, salvus.tex -- all
allow you to write additional output to the cell
- worksheet - jQuery wrapper around the current worksheet DOM object
- obj -- the optional obj argument, which is passed via JSON serialization
"""
if obj is None:
obj = {}
self._send_output(javascript={'code':code, 'coffeescript':coffeescript}, id=self._id, done=done, obj=obj, once=once)
def coffeescript(self, *args, **kwds):
"""
This is the same as salvus.javascript, but with coffeescript=True.
See the docs for the top-level javascript function for more details.
"""
kwds['coffeescript'] = True
self.javascript(*args, **kwds)
def raw_input(self, prompt='', default='', placeholder='', input_width=None, label_width=None, done=False, type=None): # done is ignored here
self._flush_stdio()
m = {'prompt':unicode8(prompt)}
if input_width is not None:
m['input_width'] = unicode8(input_width)
if label_width is not None:
m['label_width'] = unicode8(label_width)
if default:
m['value'] = unicode8(default)
if placeholder:
m['placeholder'] = unicode8(placeholder)
self._send_output(raw_input=m, id=self._id)
typ, mesg = self.message_queue.next_mesg()
if typ == 'json' and mesg['event'] == 'codemirror_sage_raw_input':
# everything worked out perfectly
self.delete_last_output()
m['value'] = mesg['value'] # as unicode!
m['submitted'] = True
self._send_output(raw_input=m, id=self._id)
value = mesg['value']
if type is not None:
if type == 'sage':
value = sage_salvus.sage_eval(value)
else:
try:
value = type(value)
except TypeError:
# Some things in Sage are clueless about unicode for some reason...
# Let's at least try, in case the unicode can convert to a string.
value = type(str(value))
return value
else:
raise KeyboardInterrupt("raw_input interrupted by another action")
def _check_component(self, component):
if component not in ['input', 'output']:
raise ValueError("component must be 'input' or 'output'")
def hide(self, component):
"""
Hide the given component ('input' or 'output') of the cell.
"""
self._check_component(component)
self._send_output(self._id, hide=component)
def show(self, component):
"""
Show the given component ('input' or 'output') of the cell.
"""
self._check_component(component)
self._send_output(self._id, show=component)
def auto(self, state=True):
"""
Set whether or not the current cells is automatically executed when
the Sage process restarts.
"""
self._send_output(self._id, auto=state)
def notify(self, **kwds):
"""
Display a graphical notification using the pnotify Javascript library.
INPUTS:
- `title: false` - The notice's title.
- `title_escape: false` - Whether to escape the content of the title. (Not allow HTML.)
- `text: false` - The notice's text.
- `text_escape: false` - Whether to escape the content of the text. (Not allow HTML.)
- `styling: "bootstrap"` - What styling classes to use. (Can be either jqueryui or bootstrap.)
- `addclass: ""` - Additional classes to be added to the notice. (For custom styling.)
- `cornerclass: ""` - Class to be added to the notice for corner styling.
- `nonblock: false` - Create a non-blocking notice. It lets the user click elements underneath it.
- `nonblock_opacity: .2` - The opacity of the notice (if it's non-blocking) when the mouse is over it.
- `history: true` - Display a pull down menu to redisplay previous notices, and place the notice in the history.
- `auto_display: true` - Display the notice when it is created. Turn this off to add notifications to the history without displaying them.
- `width: "300px"` - Width of the notice.
- `min_height: "16px"` - Minimum height of the notice. It will expand to fit content.
- `type: "notice"` - Type of the notice. "notice", "info", "success", or "error".
- `icon: true` - Set icon to true to use the default icon for the selected style/type, false for no icon, or a string for your own icon class.
- `animation: "fade"` - The animation to use when displaying and hiding the notice. "none", "show", "fade", and "slide" are built in to jQuery. Others require jQuery UI. Use an object with effect_in and effect_out to use different effects.
- `animate_speed: "slow"` - Speed at which the notice animates in and out. "slow", "def" or "normal", "fast" or number of milliseconds.
- `opacity: 1` - Opacity of the notice.
- `shadow: true` - Display a drop shadow.
- `closer: true` - Provide a button for the user to manually close the notice.
- `closer_hover: true` - Only show the closer button on hover.
- `sticker: true` - Provide a button for the user to manually stick the notice.
- `sticker_hover: true` - Only show the sticker button on hover.
- `hide: true` - After a delay, remove the notice.
- `delay: 8000` - Delay in milliseconds before the notice is removed.
- `mouse_reset: true` - Reset the hide timer if the mouse moves over the notice.
- `remove: true` - Remove the notice's elements from the DOM after it is removed.
- `insert_brs: true` - Change new lines to br tags.
"""
obj = {}
for k, v in kwds.iteritems():
obj[k] = sage_salvus.jsonable(v)
self.javascript("$.pnotify(obj)", once=True, obj=obj)
def execute_javascript(self, code, coffeescript=False, obj=None):
"""
Tell the browser to execute javascript. Basically the same as
salvus.javascript with once=True (the default), except this
isn't tied to a particular cell. There is a worksheet object
defined in the scope of the evaluation.
See the docs for the top-level javascript function for more details.
"""
self._conn.send_json(message.execute_javascript(code,
coffeescript=coffeescript, obj=json.dumps(obj,separators=(',', ':'))))
def execute_coffeescript(self, *args, **kwds):
"""
This is the same as salvus.execute_javascript, but with coffeescript=True.
See the docs for the top-level javascript function for more details.
"""
kwds['coffeescript'] = True
self.execute_javascript(*args, **kwds)
def _cython(self, filename, **opts):
"""
Return module obtained by compiling the Cython code in the
given file.
INPUT:
- filename -- name of a Cython file
- all other options are passed to sage.misc.cython.cython unchanged,
except for use_cache which defaults to True (instead of False)
OUTPUT:
- a module
"""
if 'use_cache' not in opts:
opts['use_cache'] = True
import sage.misc.cython
modname, path = sage.misc.cython.cython(filename, **opts)
import sys
try:
sys.path.insert(0,path)
module = __import__(modname)
finally:
del sys.path[0]
return module
def _import_code(self, content, **opts):
while True:
py_file_base = uuid().replace('-','_')
if not os.path.exists(py_file_base + '.py'):
break
try:
open(py_file_base+'.py', 'w').write(content)
import sys
try:
sys.path.insert(0, os.path.abspath('.'))
mod = __import__(py_file_base)
finally:
del sys.path[0]
finally:
os.unlink(py_file_base+'.py')
os.unlink(py_file_base+'.pyc')
return mod
def _sage(self, filename, **opts):
import sage.misc.preparser
content = "from sage.all import *\n" + sage.misc.preparser.preparse_file(open(filename).read())
return self._import_code(content, **opts)
def _spy(self, filename, **opts):
import sage.misc.preparser
content = "from sage.all import Integer, RealNumber, PolynomialRing\n" + sage.misc.preparser.preparse_file(open(filename).read())
return self._import_code(content, **opts)
def _py(self, filename, **opts):
return __import__(filename)
def require(self, filename, **opts):
if not os.path.exists(filename):
raise ValueError("file '%s' must exist"%filename)
base,ext = os.path.splitext(filename)
if ext == '.pyx' or ext == '.spyx':
return self._cython(filename, **opts)
if ext == ".sage":
return self._sage(filename, **opts)
if ext == ".spy":
return self._spy(filename, **opts)
if ext == ".py":
return self._py(filename, **opts)
raise NotImplementedError("require file of type %s not implemented"%ext)
def typeset_mode(self, on=True):
sage_salvus.typeset_mode(on)
def project_info(self):
"""
Return a dictionary with information about the project in which this code is running.
EXAMPLES::
sage: salvus.project_info()
{"stdout":"{u'project_id': u'...', u'location': {u'username': u'teaAuZ9M', u'path': u'.', u'host': u'localhost', u'port': 22}, u'base_url': u'/...'}\n"}
"""
return INFO
Salvus.pdf.__func__.__doc__ = sage_salvus.show_pdf.__doc__
Salvus.raw_input.__func__.__doc__ = sage_salvus.raw_input.__doc__
Salvus.clear.__func__.__doc__ = sage_salvus.clear.__doc__
Salvus.delete_last_output.__func__.__doc__ = sage_salvus.delete_last_output.__doc__
def execute(conn, id, code, data, cell_id, preparse, message_queue):
salvus = Salvus(conn=conn, id=id, data=data, message_queue=message_queue, cell_id=cell_id)
#salvus.start_executing() # with our new mainly client-side execution this isn't needed; not doing this makes evaluation roundtrip around 100ms instead of 200ms too, which is a major win.
try:
# initialize the salvus output streams
streams = (sys.stdout, sys.stderr)
sys.stdout = BufferedOutputStream(salvus.stdout)
sys.stderr = BufferedOutputStream(salvus.stderr)
try:
# initialize more salvus functionality
sage_salvus.salvus = salvus
namespace['sage_salvus'] = sage_salvus
except:
traceback.print_exc()
if salvus._prefix:
if not code.startswith("%"):
code = salvus._prefix + '\n' + code
if salvus._postfix:
code += '\n' + salvus._postfix
salvus.execute(code, namespace=namespace, preparse=preparse)
finally:
# there must be exactly one done message, unless salvus._done is False.
if sys.stderr._buf:
if sys.stdout._buf:
sys.stdout.flush()
sys.stderr.flush(done=salvus._done)
else:
sys.stdout.flush(done=salvus._done)
(sys.stdout, sys.stderr) = streams
def drop_privileges(id, home, transient, username):
gid = id
uid = id
if transient:
os.chown(home, uid, gid)
os.setgid(gid)
os.setuid(uid)
os.environ['DOT_SAGE'] = home
mpl = os.environ['MPLCONFIGDIR']
os.environ['MPLCONFIGDIR'] = home + mpl[5:]
os.environ['HOME'] = home
os.environ['IPYTHON_DIR'] = home
os.environ['USERNAME'] = username
os.environ['USER'] = username
os.chdir(home)
# Monkey patch the Sage library and anything else that does not
# deal well with changing user. This sucks, but it is work that
# simply must be done because we're not importing the library from
# scratch (which would take a long time).
import sage.misc.misc
sage.misc.misc.DOT_SAGE = home + '/.sage/'
class MessageQueue(list):
def __init__(self, conn):
self.queue = []
self.conn = conn
def __repr__(self):
return "Sage Server Message Queue"
def __getitem__(self, i):
return self.queue[i]
def __delitem__(self, i):
del self.queue[i]
def next_mesg(self):
"""
Remove oldest message from the queue and return it.
If the queue is empty, wait for a message to arrive
and return it (does not place it in the queue).
"""
if self.queue:
return self.queue.pop()
else:
return self.conn.recv()
def recv(self):
"""
Wait until one message is received and enqueue it.
Also returns the mesg.
"""
mesg = self.conn.recv()
self.queue.insert(0,mesg)
return mesg
def session(conn):
"""
This is run by the child process that is forked off on each new
connection. It drops privileges, then handles the complete
compute session.
INPUT:
- ``conn`` -- the TCP connection
"""
mq = MessageQueue(conn)
pid = os.getpid()
# seed the random number generator(s)
import sage.all; sage.all.set_random_seed()
import random; random.seed(sage.all.initial_seed())
# get_memory_usage is not aware of being forked...
import sage.misc.getusage
sage.misc.getusage._proc_status = "/proc/%s/status"%os.getpid()
cnt = 0
while True:
try:
typ, mesg = mq.next_mesg()
#print 'INFO:child%s: received message "%s"'%(pid, mesg)
log("handling message ", truncate_text(unicode8(mesg), 400)[0])
event = mesg['event']
if event == 'terminate_session':
return
elif event == 'execute_code':
try:
execute(conn = conn,
id = mesg['id'],
code = mesg['code'],
data = mesg.get('data',None),
cell_id = mesg.get('cell_id',None),
preparse = mesg['preparse'],
message_queue = mq)
except Exception, err:
log("ERROR -- exception raised '%s' when executing '%s'"%(err, mesg['code']))
elif event == 'introspect':
try:
introspect(conn=conn, id=mesg['id'], line=mesg['line'], preparse=mesg['preparse'])
except:
pass
else:
raise RuntimeError("invalid message '%s'"%mesg)
except:
# When hub connection dies, loop goes crazy.
# Unfortunately, just catching SIGINT doesn't seem to
# work, and leads to random exits during a
# session. Howeer, when connection dies, 10000 iterations
# happen almost instantly. Ugly, but it works.
cnt += 1
if cnt > 10000:
sys.exit(0)
else:
pass
def introspect(conn, id, line, preparse):
salvus = Salvus(conn=conn, id=id) # so salvus.[tab] works -- note that Salvus(...) modifies namespace.
z = sage_parsing.introspect(line, namespace=namespace, preparse=preparse)
if z['get_completions']:
mesg = message.introspect_completions(id=id, completions=z['result'], target=z['target'])
elif z['get_help']:
mesg = message.introspect_docstring(id=id, docstring=z['result'], target=z['expr'])
elif z['get_source']:
mesg = message.introspect_source_code(id=id, source_code=z['result'], target=z['expr'])
conn.send_json(mesg)
def handle_session_term(signum, frame):
while True:
try:
pid, exit_status = os.waitpid(-1, os.WNOHANG)
except:
return
if not pid: return
secret_token = None
secret_token_path = os.path.join(os.environ['SMC'], 'secret_token')
def unlock_conn(conn):
global secret_token
if secret_token is None:
try:
secret_token = open(secret_token_path).read().strip()
except:
conn.send('n')
conn.send("Unable to accept connection, since Sage server doesn't yet know the secret token; unable to read from '%s'"%secret_token_path)
conn.close()
n = len(secret_token)
token = ''
while len(token) < n:
token += conn.recv(n)
if token != secret_token[:len(token)]:
break # definitely not right -- don't try anymore
if token != secret_token:
log("token='%s'; secret_token='%s'"%(token, secret_token))
conn.send('n') # no -- invalid login
conn.send("Invalid secret token.")
conn.close()
return False
else:
conn.send('y') # yes -- valid login
return True
def serve_connection(conn):
global PID
PID = os.getpid()
# First the client *must* send the secret shared token. If they
# don't, we return (and the connection will have been destroyed by
# unlock_conn).
log("Serving a connection")
log("Waiting for client to unlock the connection...")
# TODO -- put in a timeout (?)
if not unlock_conn(conn):
log("Client failed to unlock connection. Dumping them.")
return
log("Connection unlocked.")
try:
conn = ConnectionJSON(conn)
typ, mesg = conn.recv()
log("Received message %s"%mesg)
except Exception, err:
log("Error receiving message: %s (connection terminated)"%str(err))
raise
if mesg['event'] == 'send_signal':
if mesg['pid'] == 0:
log("invalid signal mesg (pid=0)")
else:
log("Sending a signal")
os.kill(mesg['pid'], mesg['signal'])
return
if mesg['event'] != 'start_session':
log("Received an unknown message event = %s; terminating session."%mesg['event'])
return
log("Starting a session")
desc = message.session_description(os.getpid())
log("child sending session description back: %s"%desc)
conn.send_json(desc)
session(conn=conn)
def serve(port, host, extra_imports=False):
#log.info('opening connection on port %s', port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# check for children that have finished every few seconds, so
# we don't end up with zombies.
s.settimeout(5)
s.bind((host, port))
log('Sage server %s:%s'%(host, port))
# Enabling the following signal completely breaks subprocess pexpect in many cases, which is
# obviously totally unacceptable.
#signal.signal(signal.SIGCHLD, handle_session_term)
def init_library():
tm = time.time()
log("pre-importing the sage library...")
# FOR testing purposes.
##log("fake 40 second pause to slow things down for testing....")
##time.sleep(40)
##log("done with pause")
# Monkey patching interact using the new and improved Salvus
# implementation of interact.
import sagenb.notebook.interact
sagenb.notebook.interact.interact = sage_salvus.interact
# Actually import sage now. This must happen after the interact
# import because of library interacts.
log("import sage...")
import sage.all
log("imported sage.")
# Monkey patch the html command.
import sage.interacts.library
sage.all.html = sage.misc.html.html = sage.interacts.library.html = sage_salvus.html
# Set a useful figsize default; the matplotlib one is not notebook friendly.
import sage.plot.graphics
sage.plot.graphics.Graphics.SHOW_OPTIONS['figsize']=[8,4]
# Monkey patch latex.eval, so that %latex works in worksheets
sage.misc.latex.latex.eval = sage_salvus.latex0
# Plot, integrate, etc., -- so startup time of worksheets is minimal.
cmds = ['from sage.all import *',
'from sage.calculus.predefined import x',
'import pylab']
if extra_imports:
cmds.extend(['import scipy',
'import sympy',
"plot(sin).save('%s/a.png'%os.environ['SMC'], figsize=2)",
'integrate(sin(x**2),x)'])
tm0 = time.time()
for cmd in cmds:
log(cmd)
exec cmd in namespace
global pylab
pylab = namespace['pylab'] # used for clearing
log('imported sage library and other components in %s seconds'%(time.time() - tm))
for k,v in sage_salvus.interact_functions.iteritems():
namespace[k] = sagenb.notebook.interact.__dict__[k] = v
namespace['_salvus_parsing'] = sage_parsing
for name in ['coffeescript', 'javascript', 'time', 'timeit', 'capture', 'cython',
'script', 'python', 'python3', 'perl', 'ruby', 'sh', 'prun', 'show', 'auto',
'hide', 'hideall', 'cell', 'fork', 'exercise', 'dynamic', 'var',
'reset', 'restore', 'md', 'load', 'runfile', 'typeset_mode', 'default_mode',
'sage_chat', 'fortran', 'magics', 'go', 'julia', 'pandoc', 'wiki', 'plot3d_using_matplotlib',
'mediawiki', 'help', 'raw_input', 'clear', 'delete_last_output', 'sage_eval']:
namespace[name] = getattr(sage_salvus, name)
namespace['sage_server'] = sys.modules[__name__] # http://stackoverflow.com/questions/1676835/python-how-do-i-get-a-reference-to-a-module-inside-the-module-itself
# alias pretty_print_default to typeset_mode, since sagenb has/uses that.
namespace['pretty_print_default'] = namespace['typeset_mode']
# and monkey patch it
sage.misc.latex.pretty_print_default = namespace['pretty_print_default']
sage_salvus.default_namespace = dict(namespace)
log("setup namespace with extra functions")
# Sage's pretty_print and view are both ancient and a mess
sage.all.pretty_print = sage.misc.latex.pretty_print = namespace['pretty_print'] = namespace['view'] = namespace['show']
# this way client code can tell it is running as a Sage Worksheet.
namespace['__SAGEWS__'] = True
log("Initialize sage library.")
init_library()
t = time.time()
s.listen(128)
i = 0
children = {}
log("Starting server listening for connections")
try:
while True:
i += 1
#print i, time.time()-t, 'cps: ', int(i/(time.time()-t))
# do not use log.info(...) in the server loop; threads = race conditions that hang server every so often!!
try:
if children:
for pid in children.keys():
if os.waitpid(pid, os.WNOHANG) != (0,0):
log("subprocess %s terminated, closing connection"%pid)
conn.close()
del children[pid]
try:
conn, addr = s.accept()
log("Accepted a connection from", addr)
except:
# this will happen periodically since we did s.settimeout above, so
# that we wait for children above periodically.
continue
except socket.error, msg:
continue
child_pid = os.fork()
if child_pid: # parent
log("forked off child with pid %s to handle this connection"%child_pid)
children[child_pid] = conn
else:
# child
global PID
PID = os.getpid()
log("child process, will now serve this new connection")
serve_connection(conn)
# end while
except Exception, err:
log("Error taking connection: ", err)
traceback.print_exc(file=sys.stdout)
#log.error("error: %s %s", type(err), str(err))
finally:
log("closing socket")
#s.shutdown(0)
s.close()
def run_server(port, host, pidfile, logfile=None):
global LOGFILE
if logfile:
LOGFILE = logfile
if pidfile:
open(pidfile,'w').write(str(os.getpid()))
log("run_server: port=%s, host=%s, pidfile='%s', logfile='%s'"%(port, host, pidfile, LOGFILE))
try:
serve(port, host)
finally:
if pidfile:
os.unlink(pidfile)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Run Sage server")
parser.add_argument("-p", dest="port", type=int, default=0,
help="port to listen on (default: 0); 0 = automatically allocated; saved to $SMC/data/sage_server.port")
parser.add_argument("-l", dest='log_level', type=str, default='INFO',
help="log level (default: INFO) useful options include WARNING and DEBUG")
parser.add_argument("-d", dest="daemon", default=False, action="store_const", const=True,
help="daemon mode (default: False)")
parser.add_argument("--host", dest="host", type=str, default='127.0.0.1',
help="host interface to bind to -- default is 127.0.0.1")
parser.add_argument("--pidfile", dest="pidfile", type=str, default='',
help="store pid in this file")
parser.add_argument("--logfile", dest="logfile", type=str, default='',
help="store log in this file (default: '' = don't log to a file)")
parser.add_argument("-c", dest="client", default=False, action="store_const", const=True,
help="run in test client mode number 1 (command line)")
parser.add_argument("--hostname", dest="hostname", type=str, default='',
help="hostname to connect to in client mode")
parser.add_argument("--portfile", dest="portfile", type=str, default='',
help="write port to this file")
args = parser.parse_args()
if args.daemon and not args.pidfile:
print "%s: must specify pidfile in daemon mode"%sys.argv[0]
sys.exit(1)
if args.log_level:
pass
#level = getattr(logging, args.log_level.upper())
#log.setLevel(level)
if args.client:
client1(port=args.port if args.port else int(open(args.portfile).read()), hostname=args.hostname)
sys.exit(0)
if not args.port:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM); s.bind(('',0)) # pick a free port
args.port = s.getsockname()[1]
del s
if args.portfile:
open(args.portfile,'w').write(str(args.port))
pidfile = os.path.abspath(args.pidfile) if args.pidfile else ''
logfile = os.path.abspath(args.logfile) if args.logfile else ''
if logfile:
LOGFILE = logfile
open(LOGFILE, 'w') # for now we clear it on restart...
log("setting logfile to %s"%LOGFILE)
main = lambda: run_server(port=args.port, host=args.host, pidfile=pidfile)
if args.daemon and args.pidfile:
import daemon
daemon.daemonize(args.pidfile)
main()
else:
main()
| gpl-3.0 |
michalkurka/h2o-3 | h2o-py/tests/testdir_algos/rf/pyunit_smallcatRF.py | 8 | 1954 | import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.random_forest import H2ORandomForestEstimator
import numpy as np
from sklearn import ensemble
from sklearn.metrics import roc_auc_score
def smallcatRF():
# Training set has 26 categories from A to Z
# Categories A, C, E, G, ... are perfect predictors of y = 1
# Categories B, D, F, H, ... are perfect predictors of y = 0
#Log.info("Importing alphabet_cattest.csv data...\n")
alphabet = h2o.import_file(path=pyunit_utils.locate("smalldata/gbm_test/alphabet_cattest.csv"))
alphabet["y"] = alphabet["y"].asfactor()
#Log.info("Summary of alphabet_cattest.csv from H2O:\n")
#alphabet.summary()
# Prepare data for scikit use
trainData = np.loadtxt(pyunit_utils.locate("smalldata/gbm_test/alphabet_cattest.csv"), delimiter=',', skiprows=1,
converters={0:lambda s: ord(s.decode().split("\"")[1])})
trainDataResponse = trainData[:,1]
trainDataFeatures = trainData[:,0]
# Train H2O GBM Model:
#Log.info("H2O GBM (Naive Split) with parameters:\nntrees = 1, max_depth = 1, nbins = 100\n")
rf_h2o = H2ORandomForestEstimator(ntrees=1, max_depth=1, nbins=100)
rf_h2o.train(x='X', y="y", training_frame=alphabet)
# Train scikit GBM Model:
# Log.info("scikit GBM with same parameters:")
rf_sci = ensemble.RandomForestClassifier(n_estimators=1, criterion='entropy', max_depth=1)
rf_sci.fit(trainDataFeatures[:,np.newaxis],trainDataResponse)
# h2o
rf_perf = rf_h2o.model_performance(alphabet)
auc_h2o = rf_perf.auc()
# scikit
auc_sci = roc_auc_score(trainDataResponse, rf_sci.predict_proba(trainDataFeatures[:,np.newaxis])[:,1])
#Log.info(paste("scikit AUC:", auc_sci, "\tH2O AUC:", auc_h2o))
assert auc_h2o >= auc_sci, "h2o (auc) performance degradation, with respect to scikit"
if __name__ == "__main__":
pyunit_utils.standalone_test(smallcatRF)
else:
smallcatRF()
| apache-2.0 |
lancezlin/ml_template_py | lib/python2.7/site-packages/sklearn/neural_network/tests/test_stochastic_optimizers.py | 146 | 4310 | import numpy as np
from sklearn.neural_network._stochastic_optimizers import (BaseOptimizer,
SGDOptimizer,
AdamOptimizer)
from sklearn.utils.testing import (assert_array_equal, assert_true,
assert_false, assert_equal)
shapes = [(4, 6), (6, 8), (7, 8, 9)]
def test_base_optimizer():
params = [np.zeros(shape) for shape in shapes]
for lr in [10 ** i for i in range(-3, 4)]:
optimizer = BaseOptimizer(params, lr)
assert_true(optimizer.trigger_stopping('', False))
def test_sgd_optimizer_no_momentum():
params = [np.zeros(shape) for shape in shapes]
for lr in [10 ** i for i in range(-3, 4)]:
optimizer = SGDOptimizer(params, lr, momentum=0, nesterov=False)
grads = [np.random.random(shape) for shape in shapes]
expected = [param - lr * grad for param, grad in zip(params, grads)]
optimizer.update_params(grads)
for exp, param in zip(expected, optimizer.params):
assert_array_equal(exp, param)
def test_sgd_optimizer_momentum():
params = [np.zeros(shape) for shape in shapes]
lr = 0.1
for momentum in np.arange(0.5, 0.9, 0.1):
optimizer = SGDOptimizer(params, lr, momentum=momentum, nesterov=False)
velocities = [np.random.random(shape) for shape in shapes]
optimizer.velocities = velocities
grads = [np.random.random(shape) for shape in shapes]
updates = [momentum * velocity - lr * grad
for velocity, grad in zip(velocities, grads)]
expected = [param + update for param, update in zip(params, updates)]
optimizer.update_params(grads)
for exp, param in zip(expected, optimizer.params):
assert_array_equal(exp, param)
def test_sgd_optimizer_trigger_stopping():
params = [np.zeros(shape) for shape in shapes]
lr = 2e-6
optimizer = SGDOptimizer(params, lr, lr_schedule='adaptive')
assert_false(optimizer.trigger_stopping('', False))
assert_equal(lr / 5, optimizer.learning_rate)
assert_true(optimizer.trigger_stopping('', False))
def test_sgd_optimizer_nesterovs_momentum():
params = [np.zeros(shape) for shape in shapes]
lr = 0.1
for momentum in np.arange(0.5, 0.9, 0.1):
optimizer = SGDOptimizer(params, lr, momentum=momentum, nesterov=True)
velocities = [np.random.random(shape) for shape in shapes]
optimizer.velocities = velocities
grads = [np.random.random(shape) for shape in shapes]
updates = [momentum * velocity - lr * grad
for velocity, grad in zip(velocities, grads)]
updates = [momentum * update - lr * grad
for update, grad in zip(updates, grads)]
expected = [param + update for param, update in zip(params, updates)]
optimizer.update_params(grads)
for exp, param in zip(expected, optimizer.params):
assert_array_equal(exp, param)
def test_adam_optimizer():
params = [np.zeros(shape) for shape in shapes]
lr = 0.001
epsilon = 1e-8
for beta_1 in np.arange(0.9, 1.0, 0.05):
for beta_2 in np.arange(0.995, 1.0, 0.001):
optimizer = AdamOptimizer(params, lr, beta_1, beta_2, epsilon)
ms = [np.random.random(shape) for shape in shapes]
vs = [np.random.random(shape) for shape in shapes]
t = 10
optimizer.ms = ms
optimizer.vs = vs
optimizer.t = t - 1
grads = [np.random.random(shape) for shape in shapes]
ms = [beta_1 * m + (1 - beta_1) * grad
for m, grad in zip(ms, grads)]
vs = [beta_2 * v + (1 - beta_2) * (grad ** 2)
for v, grad in zip(vs, grads)]
learning_rate = lr * np.sqrt(1 - beta_2 ** t) / (1 - beta_1**t)
updates = [-learning_rate * m / (np.sqrt(v) + epsilon)
for m, v in zip(ms, vs)]
expected = [param + update
for param, update in zip(params, updates)]
optimizer.update_params(grads)
for exp, param in zip(expected, optimizer.params):
assert_array_equal(exp, param)
| mit |
spatialaudio/improved_driving_functions_for_rectangular_loudspeaker_arrays | diffraction_edge.py | 1 | 1057 | """ Generates Figure 1b of the paper
Sascha Spors, Frank Schultz, and Till Rettberg. Improved Driving Functions
for Rectangular Loudspeaker Arrays Driven by Sound Field Synthesis. In
German Annual Conference on Acoustics (DAGA), March 2016.
2D scattering of a line source at an semi-infinte edge.
(c) Sascha Spors 2016, MIT Licence
"""
import numpy as np
import sfs
import matplotlib.pyplot as plt
f = 500 # frequency
omega = 2 * np.pi * f # angular frequency
alpha = 270/180*np.pi # outer angle of edge
xs = [-2, 2, 0] # position of line source
Nc = 400 # max number of circular harmonics
# compute field
grid = sfs.util.xyz_grid([-3, 5], [-5, 3], 0, spacing=0.02)
p = sfs.mono.source.line_dirichlet_edge(omega, xs, grid, Nc=Nc)
# plot field
plt.style.use(('paper.mplstyle', 'paper_box.mplstyle'))
fig = plt.figure()
ax = fig.gca()
sfs.plot.soundfield(30*p, grid, colorbar=False)
ax.plot((0, 0), (-5, 0), 'k-', lw=2)
ax.plot((0, 5), (0, 0), 'k-', lw=2)
plt.axis('off')
fig.savefig('../paper/figs/scattering_edge_500Hz.pdf')
| mit |
bjornsturmberg/NumBAT | JOSAB_tutorial/simo-josab-BSBS-acbands-450x200nmrectwg-Si.py | 1 | 3781 | """
Calculate dispersion diagram of the acoustic modes in a rectangular Si waveguide
"""
# Import the necessary packages
import time
import datetime
import numpy as np
import sys
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
sys.path.append("../backend/")
import materials
import objects
import mode_calcs
import integration
import plotting
from fortran import NumBAT
start = time.time()
# Geometric Parameters - all in nm.
wl_nm = 1550
unitcell_x = 3.01*wl_nm
unitcell_y = unitcell_x
inc_a_x = 450 # Waveguide widths.
inc_a_y = 200
inc_shape = 'rectangular'
# Choose modes to include.
num_modes_EM_pump = 20
num_modes_EM_Stokes = num_modes_EM_pump
num_modes_AC = 100
EM_ival_pump = 0
EM_ival_Stokes = EM_ival_pump
AC_ival = 'All'
# Use all specified parameters to create a waveguide object
wguide = objects.Struct(unitcell_x,inc_a_x,unitcell_y,inc_a_y,inc_shape,
material_bkg=materials.materials_dict["Vacuum"],
material_a=materials.materials_dict["Si_2021_Poulton"],
lc_bkg=0.05, # mesh coarseness in background, larger lc_bkg = coarser along horizontal outer edge
lc_refine_1=20.0, # mesh refinement factor near the interface of waveguide, larger = finer along horizontal interface
lc_refine_2=30.0, # mesh refinement factor near the origin/centre of waveguide
plt_mesh=False, # creates png file of geometry and mesh in backend/fortran/msh/
check_mesh=False) # note requires x-windows configuration to work
# Expected effective index of fundamental guided mode.
n_eff = wguide.material_a.n-0.1
# Calculate Electromagnetic modes.
sim_EM_pump = wguide.calc_EM_modes(num_modes_EM_pump, wl_nm, n_eff)
sim_EM_Stokes = mode_calcs.bkwd_Stokes_modes(sim_EM_pump)
# Print EM mode info
print('\n k_z of EM modes \n', np.round(np.real(sim_EM_pump.Eig_values),4))
n_eff_sim = np.real(sim_EM_pump.Eig_values[EM_ival_pump]*((wl_nm*1e-9)/(2.*np.pi)))
print("\n Fundamental optical mode ")
print(" n_eff = ", np.round(n_eff_sim, 4))
# k_AC of backward SBS.
k_AC = np.real(sim_EM_pump.Eig_values[EM_ival_pump] - sim_EM_Stokes.Eig_values[EM_ival_Stokes])
# Number of wavevectors steps.
nu_ks = 50
plt.clf()
plt.figure(figsize=(10,6))
ax = plt.subplot(1,1,1)
for i_ac, q_ac in enumerate(np.linspace(0.0,k_AC,nu_ks)):
sim_AC = wguide.calc_AC_modes(num_modes_AC, q_ac, EM_sim=sim_EM_pump)
prop_AC_modes = np.array([np.real(x) for x in sim_AC.Eig_values if abs(np.real(x)) > abs(np.imag(x))])
sym_list = integration.symmetries(sim_AC)
for i in range(len(prop_AC_modes)):
Om = prop_AC_modes[i]*1e-9
if sym_list[i][0] == 1 and sym_list[i][1] == 1 and sym_list[i][2] == 1:
sym_A, = plt.plot(np.real(q_ac/k_AC), Om, 'or')
if sym_list[i][0] == -1 and sym_list[i][1] == 1 and sym_list[i][2] == -1:
sym_B1, = plt.plot(np.real(q_ac/k_AC), Om, 'vc')
if sym_list[i][0] == 1 and sym_list[i][1] == -1 and sym_list[i][2] == -1:
sym_B2, = plt.plot(np.real(q_ac/k_AC), Om, 'sb')
if sym_list[i][0] == -1 and sym_list[i][1] == -1 and sym_list[i][2] == 1:
sym_B3, = plt.plot(np.real(q_ac/k_AC), Om, '^g')
print("Wavevector loop", i_ac+1, "/", nu_ks)
ax.set_ylim(0,15)
ax.set_xlim(0,1)
plt.legend([sym_A, sym_B1, sym_B2, sym_B3],['A',r'B$_1$',r'B$_2$',r'B$_3$'], loc='lower right')
plt.xlabel(r'Axial wavevector (normalised)')
plt.ylabel(r'Frequency (GHz)')
plt.savefig('dispersioncurves_classified.png', bbox_inches='tight')
plt.close()
end = time.time()
print("\n Simulation time (sec.)", (end - start))
| gpl-3.0 |
theoryno3/scikit-learn | examples/text/mlcomp_sparse_document_classification.py | 292 | 4498 | """
========================================================
Classification of text documents: using a MLComp dataset
========================================================
This is an example showing how the scikit-learn can be used to classify
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
The dataset used in this example is the 20 newsgroups dataset and should be
downloaded from the http://mlcomp.org (free registration required):
http://mlcomp.org/datasets/379
Once downloaded unzip the archive somewhere on your filesystem.
For instance in::
% mkdir -p ~/data/mlcomp
% cd ~/data/mlcomp
% unzip /path/to/dataset-379-20news-18828_XXXXX.zip
You should get a folder ``~/data/mlcomp/379`` with a file named ``metadata``
and subfolders ``raw``, ``train`` and ``test`` holding the text documents
organized by newsgroups.
Then set the ``MLCOMP_DATASETS_HOME`` environment variable pointing to
the root folder holding the uncompressed archive::
% export MLCOMP_DATASETS_HOME="~/data/mlcomp"
Then you are ready to run this example using your favorite python shell::
% ipython examples/mlcomp_sparse_document_classification.py
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import os
import numpy as np
import scipy.sparse as sp
import pylab as pl
from sklearn.datasets import load_mlcomp
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
print(__doc__)
if 'MLCOMP_DATASETS_HOME' not in os.environ:
print("MLCOMP_DATASETS_HOME not set; please follow the above instructions")
sys.exit(0)
# Load the training set
print("Loading 20 newsgroups training set... ")
news_train = load_mlcomp('20news-18828', 'train')
print(news_train.DESCR)
print("%d documents" % len(news_train.filenames))
print("%d categories" % len(news_train.target_names))
print("Extracting features from the dataset using a sparse vectorizer")
t0 = time()
vectorizer = TfidfVectorizer(encoding='latin1')
X_train = vectorizer.fit_transform((open(f).read()
for f in news_train.filenames))
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_train.shape)
assert sp.issparse(X_train)
y_train = news_train.target
print("Loading 20 newsgroups test set... ")
news_test = load_mlcomp('20news-18828', 'test')
t0 = time()
print("done in %fs" % (time() - t0))
print("Predicting the labels of the test set...")
print("%d documents" % len(news_test.filenames))
print("%d categories" % len(news_test.target_names))
print("Extracting features from the dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform((open(f).read() for f in news_test.filenames))
y_test = news_test.target
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_test.shape)
###############################################################################
# Benchmark classifiers
def benchmark(clf_class, params, name):
print("parameters:", params)
t0 = time()
clf = clf_class(**params).fit(X_train, y_train)
print("done in %fs" % (time() - t0))
if hasattr(clf, 'coef_'):
print("Percentage of non zeros coef: %f"
% (np.mean(clf.coef_ != 0) * 100))
print("Predicting the outcomes of the testing set")
t0 = time()
pred = clf.predict(X_test)
print("done in %fs" % (time() - t0))
print("Classification report on test set for classifier:")
print(clf)
print()
print(classification_report(y_test, pred,
target_names=news_test.target_names))
cm = confusion_matrix(y_test, pred)
print("Confusion matrix:")
print(cm)
# Show confusion matrix
pl.matshow(cm)
pl.title('Confusion matrix of the %s classifier' % name)
pl.colorbar()
print("Testbenching a linear classifier...")
parameters = {
'loss': 'hinge',
'penalty': 'l2',
'n_iter': 50,
'alpha': 0.00001,
'fit_intercept': True,
}
benchmark(SGDClassifier, parameters, 'SGD')
print("Testbenching a MultinomialNB classifier...")
parameters = {'alpha': 0.01}
benchmark(MultinomialNB, parameters, 'MultinomialNB')
pl.show()
| bsd-3-clause |
sahilshekhawat/pydy | bin/benchmark_pydy_code_gen.py | 4 | 3648 | #!/usr/bin/env python
# standard library
import time
# external libraries
from numpy import hstack, ones, pi, linspace, array, zeros, zeros_like
import matplotlib.pyplot as plt
from pydy.models import n_link_pendulum_on_cart
from sympy import symbols
def run_benchmark(max_num_links, num_time_steps=1000):
"""Runs the n link pendulum derivation, code generation, and integration
for each n up to the max number provided and generates a plot of the
results."""
methods = ['lambdify', 'theano', 'cython']
link_numbers = range(1, max_num_links + 1)
derivation_times = zeros(len(link_numbers))
integration_times = zeros((max_num_links, len(methods)))
code_generation_times = zeros_like(integration_times)
for j, n in enumerate(link_numbers):
title = "Pendulum with {} links.".format(n)
print(title)
print('=' * len(title))
start = time.time()
sys = n_link_pendulum_on_cart(n, cart_force=False)
m = symbols('m:{}'.format(n + 1))
l = symbols('l:{}'.format(n))
g = symbols('g')
derivation_times[j] = time.time() - start
print('The derivation took {:1.5f} seconds.\n'.format(derivation_times[j]))
# Define the numerical values: parameters, time, and initial conditions
arm_length = 1. / n
bob_mass = 0.01 / n
parameter_vals = [9.81, 0.01 / n]
for i in range(n):
parameter_vals += [arm_length, bob_mass]
times = linspace(0, 10, num_time_steps)
sys.times = times
x0 = hstack(
(0,
pi / 2 * ones(len(sys.coordinates) - 1),
1e-3 * ones(len(sys.speeds))))
sys.initial_conditions = dict(zip(sys.states, x0))
constants = [g, m[0]]
for i in range(n):
constants += [l[i], m[i + 1]]
sys.constants = dict(zip(constants, array(parameter_vals)))
for k, method in enumerate(methods):
subtitle = "Generating with {} method.".format(method)
print(subtitle)
print('-' * len(subtitle))
start = time.time()
sys.generate_ode_function(generator=method)
code_generation_times[j, k] = time.time() - start
print('The code generation took {:1.5f} seconds.'.format(
code_generation_times[j, k]))
start = time.time()
sys.integrate()
integration_times[j, k] = time.time() - start
print('ODE integration took {:1.5f} seconds.\n'.format(
integration_times[j, k]))
del sys
# plot the results
fig, ax = plt.subplots(3, 1, sharex=True)
ax[0].plot(link_numbers, derivation_times)
ax[0].set_title('Symbolic Derivation Time')
ax[1].plot(link_numbers, code_generation_times)
ax[1].set_title('Code Generation Time')
ax[1].legend(methods, loc=2)
ax[2].plot(link_numbers, integration_times)
ax[2].set_title('Integration Time')
ax[2].legend(methods, loc=2)
for a in ax.flatten():
a.set_ylabel('Time [s]')
ax[-1].set_xlabel('Number of links')
plt.tight_layout()
fig.savefig('benchmark-results.png')
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description='Run the n link pendulum benchmark.')
parser.add_argument('max_num_links', type=int,
help="The maximum number of links to compute.")
parser.add_argument('num_time_steps', type=int,
help="The number of integration time steps.")
args = parser.parse_args()
run_benchmark(args.max_num_links, args.num_time_steps)
| bsd-3-clause |
LCAS/zoidbot | vrep_teleop/scripts/replay_trajectories.py | 1 | 4607 | #!/usr/bin/env python
# run the ReplaySavedTrajectory.ttt file on Vrep before running this
import numpy as np
import glob
import errno
import matplotlib.pyplot as plt
import rospy
from sensor_msgs.msg import JointState
from std_msgs.msg import Header
from std_msgs.msg import Float64
class Trajectory:
trajectory_number = 0
def __init__(self, filename):
self.__class__.trajectory_number = self.__class__.trajectory_number + 1
self.input_file = filename
self.read_count = 0
self.phase = None
self.joint_positions = None
self.joint_velocities = None
self.box_positions = None
self.box_orientations = None
self.context = None
self.read()
def read(self):
with open(self.input_file) as f_in:
content = f_in.readlines()
new_content = [line.strip().split(' ') for line in content]
data_dict = {}
for line_pie in new_content:
feature_name = line_pie.pop(0)
this_v = [float(v) for v in line_pie]
if feature_name not in data_dict:
data_dict[feature_name] = []
data_dict[feature_name].append(this_v)
ts = np.array(data_dict['simTime'])[:, 0]
ts = (ts - ts[0])
self.read_count = len(ts)
self.phase = ts / ts[self.read_count - 1]
self.joint_positions = np.array(data_dict['currentPos'])
self.joint_velocities = np.array(data_dict['currentVel'])
self.box_positions = np.array(data_dict['boxPosition'])
self.box_orientations = np.array(data_dict['boxOrientation'])
# function to play a trajectory in vrep given the trajectory joint positions and initial box data
def baxter_play(traj, box_data):
pub1 = rospy.Publisher('/replay/joint_states', JointState, queue_size=10)
rate_value = 250
rate1 = rospy.Rate(rate_value)
pub2 = rospy.Publisher('/stopSim', Float64, queue_size=1)
rate2 = rospy.Rate(160)
prev_pos = traj[0]
for pos in traj:
replay = JointState()
replay.header = Header()
replay.header.stamp = rospy.Time.now()
replay.name = ['ljoint1', 'ljoint2', 'ljoint3', 'ljoint4', 'ljoint5', 'ljoint6', 'ljoint7', 'rjoint1',
'rjoint2', 'rjoint3', 'rjoint4', 'rjoint5', 'rjoint6', 'rjoint7']
replay.position = pos
replay.velocity = (pos - prev_pos) * rate_value
replay.effort = box_data
prev_pos = pos
pub1.publish(replay)
rate1.sleep()
# for vrep to know when a trajectory ends and to keep the joints from moving randomly
for i in range(0, 5):
replay = JointState()
replay.header = Header()
replay.header.stamp = rospy.Time.now()
replay.name = ['ljoint1', 'ljoint2', 'ljoint3', 'ljoint4', 'ljoint5', 'ljoint6', 'ljoint7', 'rjoint1',
'rjoint2', 'rjoint3', 'rjoint4', 'rjoint5', 'rjoint6', 'rjoint7']
replay.position = pos
replay.velocity = (pos - pos) # to send 0s so that the joints don't go haywire
replay.effort = []
pub1.publish(replay)
pub2.publish(data=1)
rate2.sleep()
def plot(trajectory_list, joints_list):
for j in joints_list:
plt.figure(j * 2)
plt.title('left_arm joint'+str(j+1))
for i in range(0, len(trajectory_list)):
y = trajectory_list[i].joint_positions[:, j]
x = trajectory_list[i].phase
plt.plot(x, y)
plt.figure(j * 2 + 1)
plt.title('right_arm joint' + str(j + 1))
for i in range(0, len(trajectory_list)):
y = trajectory_list[i].joint_positions[:, j + 7]
x = trajectory_list[i].phase
plt.plot(x, y)
plt.show()
if __name__ == '__main__':
rospy.init_node('trajectory_replay', anonymous=True)
input_files = glob.glob('/home/akhil/Data/box_40_15_15/turnDemoSlave_c_5_t_*_r.txt')
trajectory_list = []
for name in input_files:
try:
trajectory_list.append(Trajectory(name))
except IOError as exc: # Not sure what error this is
if exc.errno != errno.EISDIR:
raise
for i in range(0, len(trajectory_list)):
baxter_play(trajectory_list[i].joint_positions,
np.concatenate((trajectory_list[i].box_positions[0], trajectory_list[i].box_orientations[0]), axis=0))
joints_to_plot = [0, 1, 2, 3, 4, 5, 6]
plot(trajectory_list, joints_to_plot)
print('end')
| mit |
Sapphirine/Salary-Engine | SalaryEngine,Python/train.py | 1 | 1831 | import data_io
from features import FeatureMapper, SimpleTransform
import numpy as np
import pickle
from sklearn.ensemble import RandomForestRegressor
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.pipeline import Pipeline
def feature_extractor():
features = [('FullDescription-Bag of Words', 'FullDescription', CountVectorizer(max_features=100)),
('Title-Bag of Words', 'Title', CountVectorizer(max_features=100)),
('LocationRaw-Bag of Words', 'LocationRaw', CountVectorizer(max_features=100)),
('LocationNormalized-Bag of Words', 'LocationNormalized', CountVectorizer(max_features=100))]
combined = FeatureMapper(features)
return combined
def get_pipeline():
features = feature_extractor()
steps = [("extract_features", features),
("classify", RandomForestRegressor(n_estimators=50, #The number of trees in the forest.
verbose=2,#Controls the verbosity of the tree building process.
n_jobs=1,#The number of jobs to run in parallel for both fit and predict.
min_samples_split=30, #The minimum number of samples required to split an internal node.
random_state=3465343 #seed used by the random number generator
))]
return Pipeline(steps)
def main():
print("Reading in the training data")
train = data_io.get_train_df()
print("Extracting features and training model")
classifier = get_pipeline()
classifier.fit(train, train["SalaryNormalized"])
print("Saving the classifier")
data_io.save_model(classifier)
if __name__=="__main__":
main() | apache-2.0 |
bassio/omicexperiment | omicexperiment/transforms/observation.py | 1 | 7727 | import numpy as np
from omicexperiment.transforms.transform import Transform
from omicexperiment.transforms.general import Rarefaction
from pandas import DataFrame, concat
from collections import OrderedDict
def number_unique_obs(series):
return (series > 0).sum()
class ObservationSumCounts(Transform):
def __dapply__(self, experiment):
return experiment.data_df.sum(axis=1).to_frame("sum_counts")
def __eapply__(self, experiment):
sums_df = self.__dapply__(experiment)
return experiment.with_data_df(sums_df)
class NumberUniqueObservations(Transform):
name = 'number_unique_obs'
@staticmethod
def _number_unique_obs(series):
return (series > 0).sum()
@classmethod
def __dapply__(cls, experiment):
transformed_series = (experiment.data_df > 0).sum().transpose()
transformed_series.name = cls.name
transposed_transformed_df = DataFrame(transformed_series).transpose()
return transposed_transformed_df
@classmethod
def __eapply__(cls, experiment):
transposed_transformed_df = cls.__dapply__(experiment)
return experiment.with_data_df(transposed_transformed_df)
class ClusterObservations(Transform):
def __init__(self, clusters_df, aggfunc=np.sum):
self.clusters_df = clusters_df
self.clusters_df.set_index('observation', inplace=True)
self.aggfunc = aggfunc
def clusters_df_dict(self):
return self.clusters_df.to_dict()['cluster']
def __dapply__(self, experiment):
#rename the observations according to their clusters
new_data_df = experiment.data_df.rename(index=self.clusters_df_dict())
#apply the aggregation
new_data_df.index.rename('cluster', inplace=True)
new_data_df_agg = new_data_df.groupby(level='cluster').agg(self.aggfunc)
del new_data_df
return new_data_df_agg
def __eapply__(self, experiment):
new_data_df = self.__dapply__(experiment)
return experiment.with_data_df(new_data_df)
class BinObservations(ClusterObservations):
def __init__(self, observations_to_bin, groupnames='Other', aggfunc=np.sum):
if isinstance(groupnames, list) \
and len(groupnames) != len(observations_to_bin):
raise Exception("The groupnames argument must be of same length as observations_to_bin.")
clusters_df = DataFrame({'observation': observations_to_bin, 'cluster': groupnames})
clusters_df.index.name = 'observation'
ClusterObservations.__init__(self, clusters_df, aggfunc)
class AbundanceFilteringWangEtAl(ClusterObservations):
@staticmethod
def _bootstrap_series_concatenate(series, num_reps=1):
series_sum = series.sum()
rarefy = Rarefaction._rarefy_series
series_list = [rarefy(series, series_sum, num_reps=1)
for i in range(num_reps)]
return concat(series_list, axis=1).fillna(0)
@staticmethod
def calculate_af_threshold(counts_series, num_reps=1000):
series = counts_series.sort_values(ascending=False)
bootstrap_df = AbundanceFilteringWangEtAl._bootstrap_series_concatenate(series, num_reps)
bootstrap_df_transposed = bootstrap_df.transpose()
abund_real = series
abund_boot = bootstrap_df_transposed.mean().sort_values(ascending=False)
abund_995 = bootstrap_df_transposed.quantile(0.995)
abund_005 = bootstrap_df_transposed.quantile(0.005)
abund_adj = (2 * abund_real) - abund_boot
abund_adj = abund_adj.sort_values(ascending=False).fillna(0)
ci99_higher = abund_adj + (abund_995 - abund_boot)
ci99_higher = ci99_higher.sort_values(ascending=False).fillna(0)
ci99_lower = abund_adj - (abund_boot - abund_005)
ci99_lower = ci99_lower.sort_values(ascending=False)
unreliable = ci99_lower[ci99_lower <= 0].index
if unreliable.shape[0] > 0:
threshold = int(series[unreliable].max())
else:
threshold = 0
return threshold
@staticmethod
def abundance_filter_dataframe(counts_df, num_reps=1000):
calculate_threshold = AbundanceFilteringWangEtAl.calculate_af_threshold
new_df_dict = OrderedDict()
for col in counts_df.columns:
counts_column = counts_df[col]
threshold = calculate_threshold(counts_column, num_reps)
sequences_to_keep = counts_df[counts_column > threshold].index
new_df_dict[col] = counts_column.reindex(sequences_to_keep)
return DataFrame.from_dict(new_df_dict).fillna(0)
@classmethod
def __dapply__(cls, experiment):
counts_df = experiment.data
return AbundanceFilteringWangEtAl.abundance_filter_dataframe(counts_df)
@classmethod
def __eapply__(cls, experiment):
filtered_df = cls.__dapply__(experiment)
return experiment.with_data_df(filtered_df)
class AbundancePrevalenceStatistics(Transform):
def __init__(self, absence_presence_cutoff=1):
self.absence_presence_cutoff = absence_presence_cutoff
def __dapply__(self, experiment):
rel_abund_df = experiment.counts_df.sum(axis=1).sort_values(ascending=False).to_frame(name="mean_relative_abundance")
rel_abund_df = rel_abund_df.apply(lambda c: c / c.sum() * 100, axis=0)
absence_presence_cutoff = self.absence_presence_cutoff
prev_df = (experiment.counts_df >= absence_presence_cutoff).astype(int).apply(lambda c: c.sum() / c.count() * 100, axis=1).sort_values(ascending=False).to_frame("prevalence")
abund_prev_df = rel_abund_df.join(prev_df)
return abund_prev_df
def __eapply__(self, experiment):
new_data_df = self.__dapply__(experiment)
return experiment.with_data_df(new_data_df)
class AbundancePrevalenceRankStatistics(AbundancePrevalenceStatistics):
def __init__(self, absence_presence_cutoff=1, ascending=False):
super().__init__(absence_presence_cutoff)
self.ascending = ascending
def __dapply__(self, experiment):
abund_prev_df = super().__dapply__(experiment)
ranked_abund_df = abund_prev_df['mean_relative_abundance'].sort_values(ascending=self.ascending).rank(ascending=self.ascending).to_frame()
ranked_prev_df = abund_prev_df['prevalence'].sort_values(ascending=self.ascending).rank(ascending=self.ascending).to_frame()
joined_ranked_df = ranked_abund_df.join(ranked_prev_df)
if self.ascending:
return joined_ranked_df.iloc[::-1] #reverse the dataframe
else:
return joined_ranked_df
class TopAbundantObservations(Transform):
def __init__(self, n):
self.n = n
@staticmethod
def top_abundant_taxa(experiment, n):
abund_prev_df = experiment.apply(AbundancePrevalenceStatistics()).data_df
abund_series = abund_prev_df['mean_relative_abundance']
top_taxa = list(abund_series.head(n).index)
return top_taxa
def __dapply__(self, experiment):
top_taxa = TopAbundantObservations.top_abundant_taxa(experiment, self.n)
other_taxa = list(experiment.data_df.index.difference(top_taxa))
experiment_other = experiment.apply(BinObservations(other_taxa, groupnames='Other'))
return experiment_other.data_df
def __eapply__(self, experiment):
new_data_df = self.__dapply__(experiment)
return experiment.with_data_df(new_data_df)
| bsd-3-clause |
dfm/lfi | exoabc/data.py | 1 | 3105 | # -*- coding: utf-8 -*-
import os
import requests
import numpy as np
import pandas as pd
from io import BytesIO # Python 3 only!
__all__ = ["get_catalog", "get_sample", "get_q1_q17_dr24_injections"]
def get_q1_q17_dr24_injections(basepath=None, clobber=False):
if basepath is None:
basepath = os.environ.get("EXOABC_DATA", "data")
fn = os.path.join(basepath, "q1_q17_dr24_injection_robovetter_join.csv")
if os.path.exists(fn) and not clobber:
return pd.read_csv(fn)
if not os.path.exists(basepath):
os.makedirs(basepath)
url = ("https://zenodo.org/record/163405/files/"
"q1_q17_dr24_injection_robovetter_join.csv")
r = requests.get(url)
if r.status_code != requests.codes.ok:
r.raise_for_status()
fh = BytesIO(r.content)
df = pd.read_csv(fh)
df.to_csv(fn, index=False)
return df
def get_catalog(name, prefix="q1_q16", basepath=None, clobber=False):
"""
Download a catalog from the Exoplanet Archive by name and save it as a
Pandas HDF5 file.
:param name: the table name
:param basepath: the directory where the downloaded files should be saved
(default: ``data`` in the current working directory)
"""
if basepath is None:
basepath = os.environ.get("EXOABC_DATA", "data")
basepath = os.path.join(basepath, prefix)
fn = os.path.join(basepath, "{0}.h5".format(name))
if os.path.exists(fn) and not clobber:
return pd.read_hdf(fn, name)
if not os.path.exists(basepath):
os.makedirs(basepath)
fullname = prefix+"_"+name
print("Downloading {0}...".format(fullname))
url = ("http://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/"
"nph-nstedAPI?table={0}&select=*").format(fullname)
r = requests.get(url)
if r.status_code != requests.codes.ok:
r.raise_for_status()
fh = BytesIO(r.content)
df = pd.read_csv(fh)
df.to_hdf(fn, name, format="t")
return df
def get_sample(name, prefix="q1_q16", basepath=None, clobber=False, join=None,
join_on="kepid", **kwargs):
df = get_catalog(name, prefix=prefix, basepath=basepath, clobber=clobber)
if join is not None:
df = pd.merge(df, join, on=join_on, how="inner")
m = np.ones(len(df), dtype=bool)
for column, value in kwargs.items():
# Special values and strings:
if value == "finite":
m &= np.isfinite(df[column])
continue
if isinstance(value, str):
m &= df[column] == value
continue
# Single values:
try:
len(value)
except TypeError:
m &= df[column] == value
continue
# Ranges:
if len(value) != 2:
raise ValueError("unrecognized argument: {0} = {1}".format(
column, value
))
m &= np.isfinite(df[column])
if value[0] is not None:
m &= value[0] <= df[column]
if value[1] is not None:
m &= df[column] < value[1]
return pd.DataFrame(df[m])
| mit |
saketkc/statsmodels | statsmodels/tsa/statespace/tests/test_tools.py | 6 | 12534 | """
Tests for tools
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
from statsmodels.tsa.statespace import tools
from statsmodels.tsa.api import acovf
# from .results import results_sarimax
from numpy.testing import (
assert_allclose, assert_equal, assert_array_equal, assert_almost_equal,
assert_raises
)
class TestCompanionMatrix(object):
cases = [
(2, np.array([[0,1],[0,0]])),
([1,-1,-2], np.array([[1,1],
[2,0]])),
([1,-1,-2,-3], np.array([[1,1,0],
[2,0,1],
[3,0,0]])),
([1,-np.array([[1,2],[3,4]]),-np.array([[5,6],[7,8]])],
np.array([[1,2,5,6],
[3,4,7,8],
[1,0,0,0],
[0,1,0,0]]).T)
]
def test_cases(self):
for polynomial, result in self.cases:
assert_equal(tools.companion_matrix(polynomial), result)
class TestDiff(object):
x = np.arange(10)
cases = [
# diff = 1
([1,2,3], 1, None, 1, [1, 1]),
# diff = 2
(x, 2, None, 1, [0]*8),
# diff = 1, seasonal_diff=1, k_seasons=4
(x, 1, 1, 4, [0]*5),
(x**2, 1, 1, 4, [8]*5),
(x**3, 1, 1, 4, [60, 84, 108, 132, 156]),
# diff = 1, seasonal_diff=2, k_seasons=2
(x, 1, 2, 2, [0]*5),
(x**2, 1, 2, 2, [0]*5),
(x**3, 1, 2, 2, [24]*5),
(x**4, 1, 2, 2, [240, 336, 432, 528, 624]),
]
def test_cases(self):
# Basic cases
for series, diff, seasonal_diff, k_seasons, result in self.cases:
# Test numpy array
x = tools.diff(series, diff, seasonal_diff, k_seasons)
assert_almost_equal(x, result)
# Test as Pandas Series
series = pd.Series(series)
# Rewrite to test as n-dimensional array
series = np.c_[series, series]
result = np.c_[result, result]
# Test Numpy array
x = tools.diff(series, diff, seasonal_diff, k_seasons)
assert_almost_equal(x, result)
# Test as Pandas Dataframe
series = pd.DataFrame(series)
x = tools.diff(series, diff, seasonal_diff, k_seasons)
assert_almost_equal(x, result)
class TestIsInvertible(object):
cases = [
([1, -0.5], True),
([1, 1-1e-9], True),
([1, 1], False),
([1, 0.9,0.1], True),
(np.array([1,0.9,0.1]), True),
(pd.Series([1,0.9,0.1]), True)
]
def test_cases(self):
for polynomial, invertible in self.cases:
assert_equal(tools.is_invertible(polynomial), invertible)
class TestConstrainStationaryUnivariate(object):
cases = [
(np.array([2.]), -2./((1+2.**2)**0.5))
]
def test_cases(self):
for unconstrained, constrained in self.cases:
result = tools.constrain_stationary_univariate(unconstrained)
assert_equal(result, constrained)
class TestUnconstrainStationaryUnivariate(object):
cases = [
(np.array([-2./((1+2.**2)**0.5)]), np.array([2.]))
]
def test_cases(self):
for constrained, unconstrained in self.cases:
result = tools.unconstrain_stationary_univariate(constrained)
assert_allclose(result, unconstrained)
class TestStationaryUnivariate(object):
# Test that the constraint and unconstraint functions are inverses
constrained_cases = [
np.array([0]), np.array([0.1]), np.array([-0.5]), np.array([0.999])]
unconstrained_cases = [
np.array([10.]), np.array([-40.42]), np.array([0.123])]
def test_cases(self):
for constrained in self.constrained_cases:
unconstrained = tools.unconstrain_stationary_univariate(constrained)
reconstrained = tools.constrain_stationary_univariate(unconstrained)
assert_allclose(reconstrained, constrained)
for unconstrained in self.unconstrained_cases:
constrained = tools.constrain_stationary_univariate(unconstrained)
reunconstrained = tools.unconstrain_stationary_univariate(constrained)
assert_allclose(reunconstrained, unconstrained)
class TestValidateMatrixShape(object):
# name, shape, nrows, ncols, nobs
valid = [
('TEST', (5,2), 5, 2, None),
('TEST', (5,2), 5, 2, 10),
('TEST', (5,2,10), 5, 2, 10),
]
invalid = [
('TEST', (5,), 5, None, None),
('TEST', (5,1,1,1), 5, 1, None),
('TEST', (5,2), 10, 2, None),
('TEST', (5,2), 5, 1, None),
('TEST', (5,2,10), 5, 2, None),
('TEST', (5,2,10), 5, 2, 5),
]
def test_valid_cases(self):
for args in self.valid:
# Just testing that no exception is raised
tools.validate_matrix_shape(*args)
def test_invalid_cases(self):
for args in self.invalid:
assert_raises(
ValueError, tools.validate_matrix_shape, *args
)
class TestValidateVectorShape(object):
# name, shape, nrows, ncols, nobs
valid = [
('TEST', (5,), 5, None),
('TEST', (5,), 5, 10),
('TEST', (5,10), 5, 10),
]
invalid = [
('TEST', (5,2,10), 5, 10),
('TEST', (5,), 10, None),
('TEST', (5,10), 5, None),
('TEST', (5,10), 5, 5),
]
def test_valid_cases(self):
for args in self.valid:
# Just testing that no exception is raised
tools.validate_vector_shape(*args)
def test_invalid_cases(self):
for args in self.invalid:
assert_raises(
ValueError, tools.validate_vector_shape, *args
)
def test_multivariate_acovf():
_acovf = tools._compute_multivariate_acovf_from_coefficients
# Test for a VAR(1) process. From Lutkepohl (2007), pages 27-28.
# See (2.1.14) for Phi_1, (2.1.33) for Sigma_u, and (2.1.34) for Gamma_0
Sigma_u = np.array([[2.25, 0, 0],
[0, 1.0, 0.5],
[0, 0.5, 0.74]])
Phi_1 = np.array([[0.5, 0, 0],
[0.1, 0.1, 0.3],
[0, 0.2, 0.3]])
Gamma_0 = np.array([[3.0, 0.161, 0.019],
[0.161, 1.172, 0.674],
[0.019, 0.674, 0.954]])
assert_allclose(_acovf([Phi_1], Sigma_u)[0], Gamma_0, atol=1e-3)
# Test for a VAR(2) process. From Lutkepohl (2007), pages 28-29
# See (2.1.40) for Phi_1, Phi_2, (2.1.14) for Sigma_u, and (2.1.42) for
# Gamma_0, Gamma_1
Sigma_u = np.diag([0.09, 0.04])
Phi_1 = np.array([[0.5, 0.1],
[0.4, 0.5]])
Phi_2 = np.array([[0, 0],
[0.25, 0]])
Gamma_0 = np.array([[0.131, 0.066],
[0.066, 0.181]])
Gamma_1 = np.array([[0.072, 0.051],
[0.104, 0.143]])
Gamma_2 = np.array([[0.046, 0.040],
[0.113, 0.108]])
Gamma_3 = np.array([[0.035, 0.031],
[0.093, 0.083]])
assert_allclose(
_acovf([Phi_1, Phi_2], Sigma_u, maxlag=0),
[Gamma_0], atol=1e-3)
assert_allclose(
_acovf([Phi_1, Phi_2], Sigma_u, maxlag=1),
[Gamma_0, Gamma_1], atol=1e-3)
assert_allclose(
_acovf([Phi_1, Phi_2], Sigma_u),
[Gamma_0, Gamma_1], atol=1e-3)
assert_allclose(
_acovf([Phi_1, Phi_2], Sigma_u, maxlag=2),
[Gamma_0, Gamma_1, Gamma_2], atol=1e-3)
assert_allclose(
_acovf([Phi_1, Phi_2], Sigma_u, maxlag=3),
[Gamma_0, Gamma_1, Gamma_2, Gamma_3], atol=1e-3)
# Test sample acovf in the univariate case against sm.tsa.acovf
x = np.arange(20)*1.0
assert_allclose(
np.squeeze(tools._compute_multivariate_sample_acovf(x, maxlag=4)),
acovf(x)[:5])
def test_multivariate_pacf():
# Test sample acovf in the univariate case against sm.tsa.acovf
np.random.seed(1234)
x = np.arange(10000)
y = np.random.normal(size=10000)
# Note: could make this test more precise with higher nobs, but no need to
assert_allclose(
tools._compute_multivariate_sample_pacf(np.c_[x, y], maxlag=1)[0],
np.diag([1, 0]), atol=1e-2)
class TestConstrainStationaryMultivariate(object):
cases = [
# This is the same test as the univariate case above, except notice
# the sign difference; this is an array input / output
(np.array([[2.]]), np.eye(1), np.array([[2./((1+2.**2)**0.5)]])),
# Same as above, but now a list input / output
([np.array([[2.]])], np.eye(1), [np.array([[2./((1+2.**2)**0.5)]])])
]
eigval_cases = [
[np.array([[0]])],
[np.array([[100]]), np.array([[50]])],
[np.array([[30, 1], [-23, 15]]), np.array([[10, .3], [.5, -30]])],
]
def test_cases(self):
# Test against known results
for unconstrained, error_variance, constrained in self.cases:
result = tools.constrain_stationary_multivariate(
unconstrained, error_variance)
assert_allclose(result[0], constrained)
# Test that the constrained results correspond to companion matrices
# with eigenvalues less than 1 in modulus
for unconstrained in self.eigval_cases:
if type(unconstrained) == list:
cov = np.eye(unconstrained[0].shape[0])
else:
cov = np.eye(unconstrained.shape[0])
constrained, _ = tools.constrain_stationary_multivariate(unconstrained, cov)
companion = tools.companion_matrix(
[1] + [-constrained[i] for i in range(len(constrained))]
).T
assert_equal(np.max(np.abs(np.linalg.eigvals(companion))) < 1, True)
class TestUnconstrainStationaryMultivariate(object):
cases = [
# This is the same test as the univariate case above, except notice
# the sign difference; this is an array input / output
(np.array([[2./((1+2.**2)**0.5)]]), np.eye(1), np.array([[2.]])),
# Same as above, but now a list input / output
([np.array([[2./((1+2.**2)**0.5)]])], np.eye(1), [np.array([[2.]])])
]
def test_cases(self):
for constrained, error_variance, unconstrained in self.cases:
result = tools.unconstrain_stationary_multivariate(
constrained, error_variance)
assert_allclose(result[0], unconstrained)
class TestStationaryMultivariate(object):
# Test that the constraint and unconstraint functions are inverses
constrained_cases = [
np.array([[0]]), np.array([[0.1]]), np.array([[-0.5]]), np.array([[0.999]]),
[np.array([[0]])],
np.array([[0.8, -0.2]]),
[np.array([[0.8]]), np.array([[-0.2]])],
[np.array([[0.3, 0.01], [-0.23, 0.15]]), np.array([[0.1, 0.03], [0.05, -0.3]])],
np.array([[0.3, 0.01, 0.1, 0.03], [-0.23, 0.15, 0.05, -0.3]])
]
unconstrained_cases = [
np.array([[0]]), np.array([[-40.42]]), np.array([[0.123]]),
[np.array([[0]])],
np.array([[100, 50]]),
[np.array([[100]]), np.array([[50]])],
[np.array([[30, 1], [-23, 15]]), np.array([[10, .3], [.5, -30]])],
np.array([[30, 1, 10, .3], [-23, 15, .5, -30]])
]
def test_cases(self):
for constrained in self.constrained_cases:
if type(constrained) == list:
cov = np.eye(constrained[0].shape[0])
else:
cov = np.eye(constrained.shape[0])
unconstrained, _ = tools.unconstrain_stationary_multivariate(constrained, cov)
reconstrained, _ = tools.constrain_stationary_multivariate(unconstrained, cov)
assert_allclose(reconstrained, constrained)
for unconstrained in self.unconstrained_cases:
if type(unconstrained) == list:
cov = np.eye(unconstrained[0].shape[0])
else:
cov = np.eye(unconstrained.shape[0])
constrained, _ = tools.constrain_stationary_multivariate(unconstrained, cov)
reunconstrained, _ = tools.unconstrain_stationary_multivariate(constrained, cov)
# Note: low tolerance comes from last example in unconstrained_cases,
# but is not a real problem
assert_allclose(reunconstrained, unconstrained, atol=1e-4)
| bsd-3-clause |
thientu/scikit-learn | sklearn/utils/extmath.py | 70 | 21951 | """
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array, NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Data must be of same type. Supported types '
'are 32 and 64 bit float. '
'Falling back to np.dot.', NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.utils.validation import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = linalg.qr(Y, mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto' and n_samples > n_features:
transpose = True
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, V = svd_flip(U, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping. Otherwise,
use the rows of v. The choice of which variable to base the decision on
is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N) or (M, )
Argument to the logistic function
out: array-like, shape: (M, N) or (M, ), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N) or (M, )
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = np.atleast_2d(X)
X = check_array(X, dtype=np.float64)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def softmax(X, copy=True):
"""
Calculate the softmax function.
The softmax function is calculated by
np.exp(X) / np.sum(np.exp(X), axis=1)
This will cause overflow when large values are exponentiated.
Hence the largest value in each row is subtracted from each data
point to prevent this.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
copy: bool, optional
Copy X or not.
Returns
-------
out: array, shape (M, N)
Softmax function evaluated at every point in x
"""
if copy:
X = np.copy(X)
max_prob = np.max(X, axis=1).reshape((-1, 1))
X -= max_prob
np.exp(X, X)
sum_prob = np.sum(X, axis=1).reshape((-1, 1))
X /= sum_prob
return X
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _batch_mean_variance_update(X, old_mean, old_variance, old_sample_count):
"""Calculate an average mean update and a Youngs and Cramer variance update.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
old_mean : array-like, shape: (n_features,)
old_variance : array-like, shape: (n_features,)
old_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample variance:
recommendations, The American Statistician, Vol. 37, No. 3, pp. 242-247
"""
new_sum = X.sum(axis=0)
new_variance = X.var(axis=0) * X.shape[0]
old_sum = old_mean * old_sample_count
n_samples = X.shape[0]
updated_sample_count = old_sample_count + n_samples
partial_variance = old_sample_count / (n_samples * updated_sample_count) * (
n_samples / old_sample_count * old_sum - new_sum) ** 2
unnormalized_variance = old_variance * old_sample_count + new_variance + \
partial_variance
return ((old_sum + new_sum) / updated_sample_count,
unnormalized_variance / updated_sample_count,
updated_sample_count)
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
| bsd-3-clause |
Upward-Spiral-Science/team1 | other/scatterplot_synapses.py | 1 | 2150 | """make a 3d scatterplot of the synapses
currently the program only looks at data points
with synapses > mean synapses
and then randomly samples from that data set
since if there are too many data points the 3d grapher
runs very slowly
-Jay Miller """
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
filter_less_than_avg = True # filter out data points where
# number of synapses is less than avg
samples = 1000 # how many random samples?
def check_condition(row):
if row[-1] == 0:
return False
return True
def synapse_filt(row, avg):
if row[-1] > avg:
return True
return False
csv = np.genfromtxt('output.csv', delimiter=",")[1:]
a = np.apply_along_axis(check_condition, 1, csv)
a = np.where(a == True)[0]
nonzero_rows = csv[a, :]
avg_synapse = np.mean(nonzero_rows[:, -1])
xyz_only = nonzero_rows[:, [0, 1, 2]]
print xyz_only.shape
if filter_less_than_avg:
filter_avg_synapse = np.apply_along_axis(synapse_filt, 1,
nonzero_rows, avg_synapse)
a = np.where(filter_avg_synapse == True)[0]
nonzero_filtered = nonzero_rows[a, :]
xyz_only = nonzero_filtered[:, [0, 1, 2]]
print xyz_only.shape
# randomly sample
perm = np.random.permutation(xrange(1, len(xyz_only[:])))
xyz_only = xyz_only[perm[:samples]]
# get range for graphing
x_min = np.amin(xyz_only[:, 0])
x_max = np.amax(xyz_only[:, 0])
y_max = np.amax(xyz_only[:, 1])
y_min = np.amin(xyz_only[:, 1])
z_min = np.amin(xyz_only[:, 2])
z_max = np.amax(xyz_only[:, 2])
# following code adopted from
# https://www.getdatajoy.com/examples/python-plots/3d-scatter-plot
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_title('3D Scatter Plot')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
ax.set_zlim(z_min, z_max)
ax.view_init()
ax.dist = 10 # distance
ax.scatter(
xyz_only[:, 0], xyz_only[:, 1], xyz_only[:, 2], # data
c='blue', # marker colour
marker='o', # marker shape
s=30 # marker size
)
plt.show() # render the plot
| apache-2.0 |
natasasdj/OpenWPM | analysis_parallel/00_statistics.py | 1 | 3731 | #TODO: number of successfully loaded sites
#TODO: number of successfully loaded links
import pandas as pd
import sqlite3
import os
import sys
from statsmodels.distributions.empirical_distribution import ECDF
import matplotlib.pyplot as plt
res_dir = '/home/nsarafij/project/OpenWPM/analysis_parallel/results'
db = os.path.join(res_dir,'site_visits.sqlite')
conn = sqlite3.connect(db)
query = 'SELECT * FROM site_visits'
sv = pd.read_sql_query(query,conn)
# total number of pages visites
sv.shape[0] #1000619 #4351318
# number of home sites visited
sv0 = sv.query('link_id==0')
sv0.shape[0] #10000 #37999
for i in range(1,38001):
if i in sv0['site_id'].values: continue
print "missing site_id:", i
# missing site_id: 6800
# number of successfully loaded home sites
sv0['success'].sum() # 34716
sv0['success'].sum() / float(sv0.shape[0]) #0.9136
# numer of first links visited
sv1 = sv.query('link_id<>0')
sv1.shape[0] #990619 #4313319
# number of successfully loaded first links
sv1['success'].sum() #964315 #4313319
sv1['success'].sum() / float(sv1.shape[0]) # 1
# number of successfully loaded pages
sv['success'].sum() #4347837
sv['success'].sum() / float(sv.shape[0]) #0.9992
# average number of the first links in a successfully loaded sites
float(sv1.shape[0])/sv0.shape[0]
#113.51138187847049
# distribution of the number of links
# distribution of the response time for the home sites
# distribution of the response time for the links
def ecdf_for_plot(sample):
#x = np.linspace(min(sample), max(sample))
print "sample: ",type(sample)
x = sample.sort_values(ascending = False)
ecdf = ECDF(x)
# print ecdf
print "ecdf: ",type(ecdf)
y = ecdf(x)
#print y
print "y: ", type(y)
return (x,y)
resp_time_sites = df0['resp_time_3'].fillna(91)
resp_time_links = df1['resp_time_2'].fillna(61)
(x1,y1) = ecdf_for_plot(resp_time_sites)
(x2,y2) = ecdf_for_plot(resp_time_links)
no_links = df0['no_links'][df0['resp_time_3'].notnull()]
(x3,y3) = ecdf_for_plot(no_links)
fig_dir = '/home/nsarafij/project/OpenWPM/analysis/figs_10k'
plt.figure()
plt.step(x1,y1,label='sites')
plt.step(x2,y2,label='first links')
plt.title('CDF of the response time')
plt.xlabel('time [s]')
plt.legend(loc='lower right',shadow=True)
plt.grid(True)
plt.savefig(os.path.join(fig_dir,'resp_time_cdf.png'))
plt.show()
plt.figure()
plt.step(x3,y3)
plt.title('CDF of the number of links for the home sites')
plt.xlabel('number of links')
plt.grid(True)
plt.savefig(os.path.join(fig_dir,'no_links_cdf.png'))
no_links_counts = no_links.value_counts(sort = False).sort_index()
no_links_cumSum = (no_links_counts * no_links_counts.index).cumsum()
no_links_cumPerc = no_links_cumSum/no_links_cumSum.iloc[-1]
from matplotlib.ticker import FuncFormatter
# function for formating numbers in images
def thousands(x, pos):
if x>=1e9:
return '%.1fB' % (x*1e-9)
elif x>=1e6:
return '%.1fM' % (x*1e-6)
elif x>=1e3:
return '%.1fK' % (x*1e-3)
else:
return x
formatter = FuncFormatter(thousands)
fig, ax = plt.subplots()
plt.plot(no_links_cumSum)
plt.title('Cumulative sum of the number of links for the home sites')
plt.ylabel('cumulative sum')
plt.xlabel('number of links')
plt.grid(True)
ax.yaxis.set_major_formatter(formatter)
plt.tight_layout()
plt.savefig(os.path.join(fig_dir,'no_links_cumSum.png'))
plt.figure()
plt.plot(no_links_cumPerc)
plt.title('Cumulative percentage of the number of links for the home sites')
plt.xlabel('number of links')
plt.ylabel('cumulative percentage of the total number of links')
plt.grid(True)
plt.tight_layout()
plt.savefig(os.path.join(fig_dir,'no_links_cumPerc.png'))
plt.show()
| gpl-3.0 |
jslhs/sunpy | sunpy/util/util.py | 1 | 4272 | """Provides utility programs.
Notes:
The astronomy-type utilities should probably be separated out into
another file.
--schriste
"""
from __future__ import absolute_import
from scipy.constants import constants as con
__all__ = ["toggle_pylab", "degrees_to_hours", "degrees_to_arc",
"kelvin_to_keV", "keV_to_kelvin", "unique", "print_table",
"to_angstrom"]
from matplotlib import pyplot
import numpy as np
from itertools import izip, imap
def to_signed(dtype):
""" Return dtype that can hold data of passed dtype but is signed.
Raise ValueError if no such dtype exists.
Parameters
----------
dtype : np.dtype
dtype whose values the new dtype needs to be able to represent.
"""
if dtype.kind == "u":
if dtype.itemsize == 8:
raise ValueError("Cannot losslessy convert uint64 to int.")
dtype = "int%d" % (min(dtype.itemsize * 2 * 8, 64))
return np.dtype(dtype)
def toggle_pylab(fn):
""" A decorator to prevent functions from opening matplotlib windows
unexpectedly when sunpy is run in interactive shells like ipython
--pylab.
Toggles the value of matplotlib.pyplot.isinteractive() to preserve the
users' expections of pylab's behaviour in general. """
if pyplot.isinteractive():
def fn_itoggle(*args, **kwargs):
pyplot.ioff()
ret = fn(*args, **kwargs)
pyplot.ion()
return ret
return fn_itoggle
else:
return fn
def degrees_to_hours(angle):
"""Converts an angle from the degree notation to the hour, arcmin, arcsec
notation (returned as a tuple)."""
hour = int(np.floor(angle / 15))
remainder = angle / 15.0 - hour
arcminute = int(np.floor(remainder * 60))
remainder = remainder * 60 - arcminute
arcsecond = remainder * 60.0
return [hour, arcminute, arcsecond]
def degrees_to_arc(angle):
"""Converts decimal degrees to degree, arcminute,
arcsecond (returned as a tuple)."""
degree = int(np.floor(angle))
remainder = angle - degree
arcminute = int(np.floor(remainder * 60))
remainder = remainder * 60 - arcminute
arcsecond = remainder * 60.0
return [degree, arcminute, arcsecond]
wavelength = [
('Angstrom', 1e-10),
('nm', 1e-9),
('micron', 1e-6),
('micrometer', 1e-6),
('mm', 1e-3),
('cm', 1e-2),
('m', 1e-6),
]
energy = [
('eV', 1),
('keV', 1e3),
('MeV', 1e6),
]
frequency = [
('Hz', 1),
('kHz', 1e3),
('MHz', 1e6),
('GHz', 1e9),
]
units = {}
for k, v in wavelength:
units[k] = ('wavelength', v)
for k, v in energy:
units[k] = ('energy', v)
for k, v in frequency:
units[k] = ('frequency', v)
def to_angstrom(value, unit):
C = 299792458.
ANGSTROM = units['Angstrom'][1]
try:
type_, n = units[unit]
except KeyError:
raise ValueError('Cannot convert %s to Angstrom' % unit)
if type_ == 'wavelength':
x = n / ANGSTROM
return value / x
elif type_ == 'frequency':
x = 1 / ANGSTROM / n
return x * (C / value)
elif type_ == 'energy':
x = 1 / (ANGSTROM / 1e-2) / n
return x * (1 / (8065.53 * value))
else:
raise ValueError('Unable to convert %s to Angstrom' % type_)
def kelvin_to_keV(temperature):
"""Convert from temperature expressed in Kelvin to a
temperature expressed in keV"""
return temperature / (con.e / con.k * 1000.0)
def keV_to_kelvin(temperature):
"""Convert from temperature expressed in keV to a temperature
expressed in Kelvin"""
return temperature * (con.e / con.k * 1000.0)
def unique(itr, key=None):
items = set()
if key is None:
for elem in itr:
if elem not in items:
yield elem
items.add(elem)
else:
for elem in itr:
x = key(elem)
if x not in items:
yield elem
items.add(x)
def print_table(lst, colsep=' ', linesep='\n'):
width = [max(imap(len, col)) for col in izip(*lst)]
return linesep.join(
colsep.join(
col.ljust(n) for n, col in izip(width, row)
) for row in lst
)
| bsd-2-clause |
ftan84/twitter-influence-analytics | twitter-influence-analytics.py | 1 | 4876 | import twitter
import pandas
import time
import json
import os.path
import sys
import itertools
import csv
def jsonUnicodeConvert(input):
'''Converts unicode JSON to str.'''
if isinstance(input, dict):
return {jsonUnicodeConvert(key): jsonUnicodeConvert(value) for key, value in input.iteritems()}
elif isinstance(input, list):
return [jsonUnicodeConvert(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
spinner = itertools.cycle(['|', '/', '-', '\\'])
def spin():
sys.stdout.write('\b{}'.format(spinner.next()))
sys.stdout.flush()
# Load the config.json file
with open('config.json', 'r') as f:
jsonData = f.read()
config = json.loads(jsonData)
# Create the twitter object
tw = twitter.Twitter(auth=twitter.OAuth(config['access_token'], config['access_token_secret'], config['consumer_key'], config['consumer_secret']))
# Prompt the user
userinput = raw_input('Enter a comma-separated list of Twitter screen names to analyze: ')
userinput = userinput.replace(' ', '').split(',')
query = raw_input('Enter your query: ')
# The first query
print 'Running query...'
results = tw.search.tweets(q=query,
count=100,
until=time.strftime('%Y-%m-%d'))
results = jsonUnicodeConvert(results)
df = pandas.DataFrame(results['statuses'])
df['user_id'] = [r['user']['id_str'] for r in results['statuses']]
with open('data.csv', mode='w') as f:
df.to_csv(f, encoding='UTF-8')
counter = 100
# Loop the query
while not df.empty and counter > 0:
spin()
counter -= 1
minId = min(df.id) - 1
results = tw.search.tweets(q=query,
count=100,
max_id=str(minId))
df = pandas.DataFrame(results['statuses'])
df['user_id'] = [r['user']['id_str'] for r in results['statuses']]
with open('data.csv', mode='a') as f:
df.to_csv(f, encoding='UTF-8', header=False)
print '\nCompleted query'
# twitter_accounts = ['catiewayne', 'Animalists']
print 'Determining influence...'
twitter_accounts = userinput
followers = []
for sn in twitter_accounts:
spin()
if os.path.isfile('save.dat'):
with open('save.dat', 'r') as f:
next_cursor = json.loads(f.read())
else:
next_cursor = -1
max_list = 100000 / 5000 # Put a limit to how many followers we want to get
while next_cursor and max_list >= 0:
max_list -= 1
x = tw.application.rate_limit_status(resources='followers')
# print(x['resources']['followers']['/followers/ids'])
# Wait 10 minutes for the rate limit to reset
if x['resources']['followers']['/followers/ids']['remaining'] <= 5:
time.sleep(60 * 10)
results = tw.followers.ids(screen_name=sn, cursor=next_cursor, stringify_ids=True)
next_cursor = results['next_cursor']
followers += results['ids']
with open('followers.csv', 'a') as f:
writer = csv.writer(f)
for id in results['ids']:
writer.writerow([id.encode('utf8')])
with open('save.dat', 'w') as f:
f.write('{"followers_next_cursor":"%s"}' % next_cursor)
tweetData = pandas.DataFrame.from_csv('data.csv')
counter = 0
influencers = []
for user in followers:
spin()
if not tweetData[tweetData.user_id == int(user)].empty:
counter += 1
influencers.append(user)
# print(counter)
# print(len(followers))
impressions = 0
for user in influencers:
spin()
impressions += tw.users.lookup(user_id=user)[0]['followers_count']
print '\nComplete!'
# userPrettyPrint = ''
# if len(twitter_accounts) == 1:
# userPrettyPrint = twitter_accounts[0]
# elif len(twitter_accounts) == 2:
# userPrettyPrint = twitter_accounts[0] + ' and ' + twitter_accounts[1]
# elif len(twitter_accounts) > 2:
# for i in range(len(twitter_accounts) - 1):
# userPrettyPrint += twitter_accounts[i] + ','
userPrettyPrint = ''
if len(twitter_accounts) == 1:
userPrettyPrint = '@' + twitter_accounts[0]
elif len(twitter_accounts) == 2:
userPrettyPrint = '@' + twitter_accounts[0] + ' and @' + twitter_accounts[1]
elif len(twitter_accounts) > 2:
for i in range(len(twitter_accounts) - 1):
userPrettyPrint += '@' + twitter_accounts[i] + ', '
userPrettyPrint = userPrettyPrint[:-2] + ' and @' + twitter_accounts[-1]
print 'Twitter users {} have a total of {} followers.'.format(userPrettyPrint, len(followers))
print 'Of those {} followers, {} were engaged in the topic of {} in the past seven days.'.format(len(followers), len(influencers), query)
print 'Those {} have {} followers of their own.'.format(len(influencers), impressions)
print 'Your impressions have gone from {} to {}!'.format(len(followers), len(followers) + impressions)
# print impressions
# print len(influencers)
# print len(followers)
#
| mit |
rexshihaoren/scikit-learn | examples/text/mlcomp_sparse_document_classification.py | 292 | 4498 | """
========================================================
Classification of text documents: using a MLComp dataset
========================================================
This is an example showing how the scikit-learn can be used to classify
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
The dataset used in this example is the 20 newsgroups dataset and should be
downloaded from the http://mlcomp.org (free registration required):
http://mlcomp.org/datasets/379
Once downloaded unzip the archive somewhere on your filesystem.
For instance in::
% mkdir -p ~/data/mlcomp
% cd ~/data/mlcomp
% unzip /path/to/dataset-379-20news-18828_XXXXX.zip
You should get a folder ``~/data/mlcomp/379`` with a file named ``metadata``
and subfolders ``raw``, ``train`` and ``test`` holding the text documents
organized by newsgroups.
Then set the ``MLCOMP_DATASETS_HOME`` environment variable pointing to
the root folder holding the uncompressed archive::
% export MLCOMP_DATASETS_HOME="~/data/mlcomp"
Then you are ready to run this example using your favorite python shell::
% ipython examples/mlcomp_sparse_document_classification.py
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import os
import numpy as np
import scipy.sparse as sp
import pylab as pl
from sklearn.datasets import load_mlcomp
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
print(__doc__)
if 'MLCOMP_DATASETS_HOME' not in os.environ:
print("MLCOMP_DATASETS_HOME not set; please follow the above instructions")
sys.exit(0)
# Load the training set
print("Loading 20 newsgroups training set... ")
news_train = load_mlcomp('20news-18828', 'train')
print(news_train.DESCR)
print("%d documents" % len(news_train.filenames))
print("%d categories" % len(news_train.target_names))
print("Extracting features from the dataset using a sparse vectorizer")
t0 = time()
vectorizer = TfidfVectorizer(encoding='latin1')
X_train = vectorizer.fit_transform((open(f).read()
for f in news_train.filenames))
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_train.shape)
assert sp.issparse(X_train)
y_train = news_train.target
print("Loading 20 newsgroups test set... ")
news_test = load_mlcomp('20news-18828', 'test')
t0 = time()
print("done in %fs" % (time() - t0))
print("Predicting the labels of the test set...")
print("%d documents" % len(news_test.filenames))
print("%d categories" % len(news_test.target_names))
print("Extracting features from the dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform((open(f).read() for f in news_test.filenames))
y_test = news_test.target
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_test.shape)
###############################################################################
# Benchmark classifiers
def benchmark(clf_class, params, name):
print("parameters:", params)
t0 = time()
clf = clf_class(**params).fit(X_train, y_train)
print("done in %fs" % (time() - t0))
if hasattr(clf, 'coef_'):
print("Percentage of non zeros coef: %f"
% (np.mean(clf.coef_ != 0) * 100))
print("Predicting the outcomes of the testing set")
t0 = time()
pred = clf.predict(X_test)
print("done in %fs" % (time() - t0))
print("Classification report on test set for classifier:")
print(clf)
print()
print(classification_report(y_test, pred,
target_names=news_test.target_names))
cm = confusion_matrix(y_test, pred)
print("Confusion matrix:")
print(cm)
# Show confusion matrix
pl.matshow(cm)
pl.title('Confusion matrix of the %s classifier' % name)
pl.colorbar()
print("Testbenching a linear classifier...")
parameters = {
'loss': 'hinge',
'penalty': 'l2',
'n_iter': 50,
'alpha': 0.00001,
'fit_intercept': True,
}
benchmark(SGDClassifier, parameters, 'SGD')
print("Testbenching a MultinomialNB classifier...")
parameters = {'alpha': 0.01}
benchmark(MultinomialNB, parameters, 'MultinomialNB')
pl.show()
| bsd-3-clause |
nelango/ViralityAnalysis | model/lib/sklearn/utils/setup.py | 296 | 2884 | import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
cblas_compile_args = blas_info.pop('extra_compile_args', [])
cblas_includes = [join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.c'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.c'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=cblas_includes,
extra_compile_args=cblas_compile_args,
**blas_info
)
config.add_extension(
'murmurhash',
sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.c', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.c'],
include_dirs=[numpy.get_include()])
config.add_extension('fast_dict',
sources=['fast_dict.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('seq_dataset',
sources=['seq_dataset.c'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.c'],
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension("_random",
sources=["_random.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| mit |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/matplotlib/artist.py | 2 | 45464 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from collections import OrderedDict, namedtuple
from functools import wraps
import inspect
import re
import warnings
import numpy as np
import matplotlib
from . import cbook, docstring, rcParams
from .path import Path
from .transforms import (Bbox, IdentityTransform, Transform, TransformedBbox,
TransformedPatchPath, TransformedPath)
# Note, matplotlib artists use the doc strings for set and get
# methods to enable the introspection methods of setp and getp. Every
# set_* method should have a docstring containing the line
#
# ACCEPTS: [ legal | values ]
#
# and aliases for setters and getters should have a docstring that
# starts with 'alias for ', as in 'alias for set_somemethod'
#
# You may wonder why we use so much boiler-plate manually defining the
# set_alias and get_alias functions, rather than using some clever
# python trick. The answer is that I need to be able to manipulate
# the docstring, and there is no clever way to do that in python 2.2,
# as far as I can see - see
#
# https://mail.python.org/pipermail/python-list/2004-October/242925.html
def allow_rasterization(draw):
"""
Decorator for Artist.draw method. Provides routines
that run before and after the draw call. The before and after functions
are useful for changing artist-dependent renderer attributes or making
other setup function calls, such as starting and flushing a mixed-mode
renderer.
"""
# the axes class has a second argument inframe for its draw method.
@wraps(draw)
def draw_wrapper(artist, renderer, *args, **kwargs):
try:
if artist.get_rasterized():
renderer.start_rasterizing()
if artist.get_agg_filter() is not None:
renderer.start_filter()
return draw(artist, renderer, *args, **kwargs)
finally:
if artist.get_agg_filter() is not None:
renderer.stop_filter(artist.get_agg_filter())
if artist.get_rasterized():
renderer.stop_rasterizing()
draw_wrapper._supports_rasterization = True
return draw_wrapper
def _stale_axes_callback(self, val):
if self.axes:
self.axes.stale = val
_XYPair = namedtuple("_XYPair", "x y")
class Artist(object):
"""
Abstract base class for someone who renders into a
:class:`FigureCanvas`.
"""
aname = 'Artist'
zorder = 0
# order of precedence when bulk setting/updating properties
# via update. The keys should be property names and the values
# integers
_prop_order = dict(color=-1)
def __init__(self):
self._stale = True
self.stale_callback = None
self._axes = None
self.figure = None
self._transform = None
self._transformSet = False
self._visible = True
self._animated = False
self._alpha = None
self.clipbox = None
self._clippath = None
self._clipon = True
self._label = ''
self._picker = None
self._contains = None
self._rasterized = None
self._agg_filter = None
self._mouseover = False
self.eventson = False # fire events only if eventson
self._oid = 0 # an observer id
self._propobservers = {} # a dict from oids to funcs
try:
self.axes = None
except AttributeError:
# Handle self.axes as a read-only property, as in Figure.
pass
self._remove_method = None
self._url = None
self._gid = None
self._snap = None
self._sketch = rcParams['path.sketch']
self._path_effects = rcParams['path.effects']
self._sticky_edges = _XYPair([], [])
def __getstate__(self):
d = self.__dict__.copy()
# remove the unpicklable remove method, this will get re-added on load
# (by the axes) if the artist lives on an axes.
d['_remove_method'] = None
d['stale_callback'] = None
return d
def remove(self):
"""
Remove the artist from the figure if possible. The effect
will not be visible until the figure is redrawn, e.g., with
:meth:`matplotlib.axes.Axes.draw_idle`. Call
:meth:`matplotlib.axes.Axes.relim` to update the axes limits
if desired.
Note: :meth:`~matplotlib.axes.Axes.relim` will not see
collections even if the collection was added to axes with
*autolim* = True.
Note: there is no support for removing the artist's legend entry.
"""
# There is no method to set the callback. Instead the parent should
# set the _remove_method attribute directly. This would be a
# protected attribute if Python supported that sort of thing. The
# callback has one parameter, which is the child to be removed.
if self._remove_method is not None:
self._remove_method(self)
# clear stale callback
self.stale_callback = None
_ax_flag = False
if hasattr(self, 'axes') and self.axes:
# remove from the mouse hit list
self.axes.mouseover_set.discard(self)
# mark the axes as stale
self.axes.stale = True
# decouple the artist from the axes
self.axes = None
_ax_flag = True
if self.figure:
self.figure = None
if not _ax_flag:
self.figure = True
else:
raise NotImplementedError('cannot remove artist')
# TODO: the fix for the collections relim problem is to move the
# limits calculation into the artist itself, including the property of
# whether or not the artist should affect the limits. Then there will
# be no distinction between axes.add_line, axes.add_patch, etc.
# TODO: add legend support
def have_units(self):
'Return *True* if units are set on the *x* or *y* axes'
ax = self.axes
if ax is None or ax.xaxis is None:
return False
return ax.xaxis.have_units() or ax.yaxis.have_units()
def convert_xunits(self, x):
"""For artists in an axes, if the xaxis has units support,
convert *x* using xaxis unit type
"""
ax = getattr(self, 'axes', None)
if ax is None or ax.xaxis is None:
return x
return ax.xaxis.convert_units(x)
def convert_yunits(self, y):
"""For artists in an axes, if the yaxis has units support,
convert *y* using yaxis unit type
"""
ax = getattr(self, 'axes', None)
if ax is None or ax.yaxis is None:
return y
return ax.yaxis.convert_units(y)
@property
def axes(self):
"""
The :class:`~matplotlib.axes.Axes` instance the artist
resides in, or *None*.
"""
return self._axes
@axes.setter
def axes(self, new_axes):
if (new_axes is not None and self._axes is not None
and new_axes != self._axes):
raise ValueError("Can not reset the axes. You are probably "
"trying to re-use an artist in more than one "
"Axes which is not supported")
self._axes = new_axes
if new_axes is not None and new_axes is not self:
self.stale_callback = _stale_axes_callback
return new_axes
@property
def stale(self):
"""
If the artist is 'stale' and needs to be re-drawn for the output to
match the internal state of the artist.
"""
return self._stale
@stale.setter
def stale(self, val):
self._stale = val
# if the artist is animated it does not take normal part in the
# draw stack and is not expected to be drawn as part of the normal
# draw loop (when not saving) so do not propagate this change
if self.get_animated():
return
if val and self.stale_callback is not None:
self.stale_callback(self, val)
def get_window_extent(self, renderer):
"""
Get the axes bounding box in display space.
Subclasses should override for inclusion in the bounding box
"tight" calculation. Default is to return an empty bounding
box at 0, 0.
Be careful when using this function, the results will not update
if the artist window extent of the artist changes. The extent
can change due to any changes in the transform stack, such as
changing the axes limits, the figure size, or the canvas used
(as is done when saving a figure). This can lead to unexpected
behavior where interactive figures will look fine on the screen,
but will save incorrectly.
"""
return Bbox([[0, 0], [0, 0]])
def add_callback(self, func):
"""
Adds a callback function that will be called whenever one of
the :class:`Artist`'s properties changes.
Returns an *id* that is useful for removing the callback with
:meth:`remove_callback` later.
"""
oid = self._oid
self._propobservers[oid] = func
self._oid += 1
return oid
def remove_callback(self, oid):
"""
Remove a callback based on its *id*.
.. seealso::
:meth:`add_callback`
For adding callbacks
"""
try:
del self._propobservers[oid]
except KeyError:
pass
def pchanged(self):
"""
Fire an event when property changed, calling all of the
registered callbacks.
"""
for oid, func in six.iteritems(self._propobservers):
func(self)
def is_transform_set(self):
"""
Returns *True* if :class:`Artist` has a transform explicitly
set.
"""
return self._transformSet
def set_transform(self, t):
"""
Set the :class:`~matplotlib.transforms.Transform` instance
used by this artist.
ACCEPTS: :class:`~matplotlib.transforms.Transform` instance
"""
self._transform = t
self._transformSet = True
self.pchanged()
self.stale = True
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform`
instance used by this artist.
"""
if self._transform is None:
self._transform = IdentityTransform()
elif (not isinstance(self._transform, Transform)
and hasattr(self._transform, '_as_mpl_transform')):
self._transform = self._transform._as_mpl_transform(self.axes)
return self._transform
def hitlist(self, event):
"""
List the children of the artist which contain the mouse event *event*.
"""
L = []
try:
hascursor, info = self.contains(event)
if hascursor:
L.append(self)
except:
import traceback
traceback.print_exc()
print("while checking", self.__class__)
for a in self.get_children():
L.extend(a.hitlist(event))
return L
def get_children(self):
"""
Return a list of the child :class:`Artist`s this
:class:`Artist` contains.
"""
return []
def contains(self, mouseevent):
"""Test whether the artist contains the mouse event.
Returns the truth value and a dictionary of artist specific details of
selection, such as which points are contained in the pick radius. See
individual artists for details.
"""
if callable(self._contains):
return self._contains(self, mouseevent)
warnings.warn("'%s' needs 'contains' method" % self.__class__.__name__)
return False, {}
def set_contains(self, picker):
"""
Replace the contains test used by this artist. The new picker
should be a callable function which determines whether the
artist is hit by the mouse event::
hit, props = picker(artist, mouseevent)
If the mouse event is over the artist, return *hit* = *True*
and *props* is a dictionary of properties you want returned
with the contains test.
ACCEPTS: a callable function
"""
self._contains = picker
def get_contains(self):
"""
Return the _contains test used by the artist, or *None* for default.
"""
return self._contains
def pickable(self):
'Return *True* if :class:`Artist` is pickable.'
return (self.figure is not None and
self.figure.canvas is not None and
self._picker is not None)
def pick(self, mouseevent):
"""
Process pick event
each child artist will fire a pick event if *mouseevent* is over
the artist and the artist has picker set
"""
# Pick self
if self.pickable():
picker = self.get_picker()
if callable(picker):
inside, prop = picker(self, mouseevent)
else:
inside, prop = self.contains(mouseevent)
if inside:
self.figure.canvas.pick_event(mouseevent, self, **prop)
# Pick children
for a in self.get_children():
# make sure the event happened in the same axes
ax = getattr(a, 'axes', None)
if (mouseevent.inaxes is None or ax is None
or mouseevent.inaxes == ax):
# we need to check if mouseevent.inaxes is None
# because some objects associated with an axes (e.g., a
# tick label) can be outside the bounding box of the
# axes and inaxes will be None
# also check that ax is None so that it traverse objects
# which do no have an axes property but children might
a.pick(mouseevent)
def set_picker(self, picker):
"""
Set the epsilon for picking used by this artist
*picker* can be one of the following:
* *None*: picking is disabled for this artist (default)
* A boolean: if *True* then picking will be enabled and the
artist will fire a pick event if the mouse event is over
the artist
* A float: if picker is a number it is interpreted as an
epsilon tolerance in points and the artist will fire
off an event if it's data is within epsilon of the mouse
event. For some artists like lines and patch collections,
the artist may provide additional data to the pick event
that is generated, e.g., the indices of the data within
epsilon of the pick event
* A function: if picker is callable, it is a user supplied
function which determines whether the artist is hit by the
mouse event::
hit, props = picker(artist, mouseevent)
to determine the hit test. if the mouse event is over the
artist, return *hit=True* and props is a dictionary of
properties you want added to the PickEvent attributes.
ACCEPTS: [None|float|boolean|callable]
"""
self._picker = picker
def get_picker(self):
'Return the picker object used by this artist'
return self._picker
def is_figure_set(self):
"""
Returns True if the artist is assigned to a
:class:`~matplotlib.figure.Figure`.
"""
return self.figure is not None
def get_url(self):
"""
Returns the url
"""
return self._url
def set_url(self, url):
"""
Sets the url for the artist
ACCEPTS: a url string
"""
self._url = url
def get_gid(self):
"""
Returns the group id
"""
return self._gid
def set_gid(self, gid):
"""
Sets the (group) id for the artist
ACCEPTS: an id string
"""
self._gid = gid
def get_snap(self):
"""
Returns the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
Only supported by the Agg and MacOSX backends.
"""
if rcParams['path.snap']:
return self._snap
else:
return False
def set_snap(self, snap):
"""
Sets the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
Only supported by the Agg and MacOSX backends.
"""
self._snap = snap
self.stale = True
def get_sketch_params(self):
"""
Returns the sketch parameters for the artist.
Returns
-------
sketch_params : tuple or `None`
A 3-tuple with the following elements:
* `scale`: The amplitude of the wiggle perpendicular to the
source line.
* `length`: The length of the wiggle along the line.
* `randomness`: The scale factor by which the length is
shrunken or expanded.
May return `None` if no sketch parameters were set.
"""
return self._sketch
def set_sketch_params(self, scale=None, length=None, randomness=None):
"""
Sets the sketch parameters.
Parameters
----------
scale : float, optional
The amplitude of the wiggle perpendicular to the source
line, in pixels. If scale is `None`, or not provided, no
sketch filter will be provided.
length : float, optional
The length of the wiggle along the line, in pixels
(default 128.0)
randomness : float, optional
The scale factor by which the length is shrunken or
expanded (default 16.0)
"""
if scale is None:
self._sketch = None
else:
self._sketch = (scale, length or 128.0, randomness or 16.0)
self.stale = True
def set_path_effects(self, path_effects):
"""
set path_effects, which should be a list of instances of
matplotlib.patheffect._Base class or its derivatives.
"""
self._path_effects = path_effects
self.stale = True
def get_path_effects(self):
return self._path_effects
def get_figure(self):
"""
Return the :class:`~matplotlib.figure.Figure` instance the
artist belongs to.
"""
return self.figure
def set_figure(self, fig):
"""
Set the :class:`~matplotlib.figure.Figure` instance the artist
belongs to.
ACCEPTS: a :class:`matplotlib.figure.Figure` instance
"""
# if this is a no-op just return
if self.figure is fig:
return
# if we currently have a figure (the case of both `self.figure`
# and `fig` being none is taken care of above) we then user is
# trying to change the figure an artist is associated with which
# is not allowed for the same reason as adding the same instance
# to more than one Axes
if self.figure is not None:
raise RuntimeError("Can not put single artist in "
"more than one figure")
self.figure = fig
if self.figure and self.figure is not self:
self.pchanged()
self.stale = True
def set_clip_box(self, clipbox):
"""
Set the artist's clip :class:`~matplotlib.transforms.Bbox`.
ACCEPTS: a :class:`matplotlib.transforms.Bbox` instance
"""
self.clipbox = clipbox
self.pchanged()
self.stale = True
def set_clip_path(self, path, transform=None):
"""
Set the artist's clip path, which may be:
- a :class:`~matplotlib.patches.Patch` (or subclass) instance; or
- a :class:`~matplotlib.path.Path` instance, in which case a
:class:`~matplotlib.transforms.Transform` instance, which will be
applied to the path before using it for clipping, must be provided;
or
- ``None``, to remove a previously set clipping path.
For efficiency, if the path happens to be an axis-aligned rectangle,
this method will set the clipping box to the corresponding rectangle
and set the clipping path to ``None``.
ACCEPTS: [ (:class:`~matplotlib.path.Path`,
:class:`~matplotlib.transforms.Transform`) |
:class:`~matplotlib.patches.Patch` | None ]
"""
from matplotlib.patches import Patch, Rectangle
success = False
if transform is None:
if isinstance(path, Rectangle):
self.clipbox = TransformedBbox(Bbox.unit(),
path.get_transform())
self._clippath = None
success = True
elif isinstance(path, Patch):
self._clippath = TransformedPatchPath(path)
success = True
elif isinstance(path, tuple):
path, transform = path
if path is None:
self._clippath = None
success = True
elif isinstance(path, Path):
self._clippath = TransformedPath(path, transform)
success = True
elif isinstance(path, TransformedPatchPath):
self._clippath = path
success = True
elif isinstance(path, TransformedPath):
self._clippath = path
success = True
if not success:
raise TypeError(
"Invalid arguments to set_clip_path, of type {} and {}"
.format(type(path).__name__, type(transform).__name__))
# This may result in the callbacks being hit twice, but guarantees they
# will be hit at least once.
self.pchanged()
self.stale = True
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on all
backends
"""
return self._alpha
def get_visible(self):
"Return the artist's visiblity"
return self._visible
def get_animated(self):
"Return the artist's animated state"
return self._animated
def get_clip_on(self):
'Return whether artist uses clipping'
return self._clipon
def get_clip_box(self):
'Return artist clipbox'
return self.clipbox
def get_clip_path(self):
'Return artist clip path'
return self._clippath
def get_transformed_clip_path_and_affine(self):
'''
Return the clip path with the non-affine part of its
transformation applied, and the remaining affine part of its
transformation.
'''
if self._clippath is not None:
return self._clippath.get_transformed_path_and_affine()
return None, None
def set_clip_on(self, b):
"""
Set whether artist uses clipping.
When False artists will be visible out side of the axes which
can lead to unexpected results.
ACCEPTS: [True | False]
"""
self._clipon = b
# This may result in the callbacks being hit twice, but ensures they
# are hit at least once
self.pchanged()
self.stale = True
def _set_gc_clip(self, gc):
'Set the clip properly for the gc'
if self._clipon:
if self.clipbox is not None:
gc.set_clip_rectangle(self.clipbox)
gc.set_clip_path(self._clippath)
else:
gc.set_clip_rectangle(None)
gc.set_clip_path(None)
def get_rasterized(self):
"return True if the artist is to be rasterized"
return self._rasterized
def set_rasterized(self, rasterized):
"""
Force rasterized (bitmap) drawing in vector backend output.
Defaults to None, which implies the backend's default behavior
ACCEPTS: [True | False | None]
"""
if rasterized and not hasattr(self.draw, "_supports_rasterization"):
warnings.warn("Rasterization of '%s' will be ignored" % self)
self._rasterized = rasterized
def get_agg_filter(self):
"return filter function to be used for agg filter"
return self._agg_filter
def set_agg_filter(self, filter_func):
"""
set agg_filter function.
"""
self._agg_filter = filter_func
self.stale = True
def draw(self, renderer, *args, **kwargs):
'Derived classes drawing method'
if not self.get_visible():
return
self.stale = False
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on
all backends.
ACCEPTS: float (0.0 transparent through 1.0 opaque)
"""
self._alpha = alpha
self.pchanged()
self.stale = True
def set_visible(self, b):
"""
Set the artist's visiblity.
ACCEPTS: [True | False]
"""
self._visible = b
self.pchanged()
self.stale = True
def set_animated(self, b):
"""
Set the artist's animation state.
ACCEPTS: [True | False]
"""
if self._animated != b:
self._animated = b
self.pchanged()
def update(self, props):
"""
Update the properties of this :class:`Artist` from the
dictionary *prop*.
"""
def _update_property(self, k, v):
"""sorting out how to update property (setter or setattr)
Parameters
----------
k : str
The name of property to update
v : obj
The value to assign to the property
Returns
-------
ret : obj or None
If using a `set_*` method return it's return, else None.
"""
k = k.lower()
# white list attributes we want to be able to update through
# art.update, art.set, setp
if k in {'axes'}:
return setattr(self, k, v)
else:
func = getattr(self, 'set_' + k, None)
if not callable(func):
raise AttributeError('Unknown property %s' % k)
return func(v)
store = self.eventson
self.eventson = False
try:
ret = [_update_property(self, k, v)
for k, v in props.items()]
finally:
self.eventson = store
if len(ret):
self.pchanged()
self.stale = True
return ret
def get_label(self):
"""
Get the label used for this artist in the legend.
"""
return self._label
def set_label(self, s):
"""
Set the label to *s* for auto legend.
ACCEPTS: string or anything printable with '%s' conversion.
"""
if s is not None:
self._label = '%s' % (s, )
else:
self._label = None
self.pchanged()
self.stale = True
def get_zorder(self):
"""
Return the :class:`Artist`'s zorder.
"""
return self.zorder
def set_zorder(self, level):
"""
Set the zorder for the artist. Artists with lower zorder
values are drawn first.
ACCEPTS: any number
"""
self.zorder = level
self.pchanged()
self.stale = True
@property
def sticky_edges(self):
"""
`x` and `y` sticky edge lists.
When performing autoscaling, if a data limit coincides with a value in
the corresponding sticky_edges list, then no margin will be added--the
view limit "sticks" to the edge. A typical usecase is histograms,
where one usually expects no margin on the bottom edge (0) of the
histogram.
This attribute cannot be assigned to; however, the `x` and `y` lists
can be modified in place as needed.
Examples
--------
>>> artist.sticky_edges.x[:] = (xmin, xmax)
>>> artist.sticky_edges.y[:] = (ymin, ymax)
"""
return self._sticky_edges
def update_from(self, other):
'Copy properties from *other* to *self*.'
self._transform = other._transform
self._transformSet = other._transformSet
self._visible = other._visible
self._alpha = other._alpha
self.clipbox = other.clipbox
self._clipon = other._clipon
self._clippath = other._clippath
self._label = other._label
self._sketch = other._sketch
self._path_effects = other._path_effects
self.sticky_edges.x[:] = other.sticky_edges.x[:]
self.sticky_edges.y[:] = other.sticky_edges.y[:]
self.pchanged()
self.stale = True
def properties(self):
"""
return a dictionary mapping property name -> value for all Artist props
"""
return ArtistInspector(self).properties()
def set(self, **kwargs):
"""A property batch setter. Pass *kwargs* to set properties.
"""
props = OrderedDict(
sorted(kwargs.items(), reverse=True,
key=lambda x: (self._prop_order.get(x[0], 0), x[0])))
return self.update(props)
def findobj(self, match=None, include_self=True):
"""
Find artist objects.
Recursively find all :class:`~matplotlib.artist.Artist` instances
contained in self.
*match* can be
- None: return all objects contained in artist.
- function with signature ``boolean = match(artist)``
used to filter matches
- class instance: e.g., Line2D. Only return artists of class type.
If *include_self* is True (default), include self in the list to be
checked for a match.
"""
if match is None: # always return True
def matchfunc(x):
return True
elif isinstance(match, type) and issubclass(match, Artist):
def matchfunc(x):
return isinstance(x, match)
elif callable(match):
matchfunc = match
else:
raise ValueError('match must be None, a matplotlib.artist.Artist '
'subclass, or a callable')
artists = sum([c.findobj(matchfunc) for c in self.get_children()], [])
if include_self and matchfunc(self):
artists.append(self)
return artists
def get_cursor_data(self, event):
"""
Get the cursor data for a given event.
"""
return None
def format_cursor_data(self, data):
"""
Return *cursor data* string formatted.
"""
try:
data[0]
except (TypeError, IndexError):
data = [data]
return ', '.join('{:0.3g}'.format(item) for item in data if
isinstance(item, (np.floating, np.integer, int, float)))
@property
def mouseover(self):
return self._mouseover
@mouseover.setter
def mouseover(self, val):
val = bool(val)
self._mouseover = val
ax = self.axes
if ax:
if val:
ax.mouseover_set.add(self)
else:
ax.mouseover_set.discard(self)
class ArtistInspector(object):
"""
A helper class to inspect an :class:`~matplotlib.artist.Artist`
and return information about it's settable properties and their
current values.
"""
def __init__(self, o):
"""
Initialize the artist inspector with an
:class:`~matplotlib.artist.Artist` or iterable of :class:`Artists`.
If an iterable is used, we assume it is a homogeneous sequence (all
:class:`Artists` are of the same type) and it is your responsibility
to make sure this is so.
"""
if cbook.iterable(o):
# Wrapped in list instead of doing try-except around next(iter(o))
o = list(o)
if len(o):
o = o[0]
self.oorig = o
if not inspect.isclass(o):
o = type(o)
self.o = o
self.aliasd = self.get_aliases()
def get_aliases(self):
"""
Get a dict mapping *fullname* -> *alias* for each *alias* in
the :class:`~matplotlib.artist.ArtistInspector`.
e.g., for lines::
{'markerfacecolor': 'mfc',
'linewidth' : 'lw',
}
"""
names = [name for name in dir(self.o)
if name.startswith(('set_', 'get_'))
and callable(getattr(self.o, name))]
aliases = {}
for name in names:
func = getattr(self.o, name)
if not self.is_alias(func):
continue
docstring = func.__doc__
fullname = docstring[10:]
aliases.setdefault(fullname[4:], {})[name[4:]] = None
return aliases
_get_valid_values_regex = re.compile(
r"\n\s*ACCEPTS:\s*((?:.|\n)*?)(?:$|(?:\n\n))"
)
def get_valid_values(self, attr):
"""
Get the legal arguments for the setter associated with *attr*.
This is done by querying the docstring of the function *set_attr*
for a line that begins with ACCEPTS:
e.g., for a line linestyle, return
"[ ``'-'`` | ``'--'`` | ``'-.'`` | ``':'`` | ``'steps'`` | ``'None'``
]"
"""
name = 'set_%s' % attr
if not hasattr(self.o, name):
raise AttributeError('%s has no function %s' % (self.o, name))
func = getattr(self.o, name)
docstring = func.__doc__
if docstring is None:
return 'unknown'
if docstring.startswith('alias for '):
return None
match = self._get_valid_values_regex.search(docstring)
if match is not None:
return re.sub("\n *", " ", match.group(1))
return 'unknown'
def _get_setters_and_targets(self):
"""
Get the attribute strings and a full path to where the setter
is defined for all setters in an object.
"""
setters = []
for name in dir(self.o):
if not name.startswith('set_'):
continue
func = getattr(self.o, name)
if not callable(func):
continue
if six.PY2:
nargs = len(inspect.getargspec(func)[0])
else:
nargs = len(inspect.getfullargspec(func)[0])
if nargs < 2 or self.is_alias(func):
continue
source_class = self.o.__module__ + "." + self.o.__name__
for cls in self.o.mro():
if name in cls.__dict__:
source_class = cls.__module__ + "." + cls.__name__
break
setters.append((name[4:], source_class + "." + name))
return setters
def get_setters(self):
"""
Get the attribute strings with setters for object. e.g., for a line,
return ``['markerfacecolor', 'linewidth', ....]``.
"""
return [prop for prop, target in self._get_setters_and_targets()]
def is_alias(self, o):
"""
Return *True* if method object *o* is an alias for another
function.
"""
ds = o.__doc__
if ds is None:
return False
return ds.startswith('alias for ')
def aliased_name(self, s):
"""
return 'PROPNAME or alias' if *s* has an alias, else return
PROPNAME.
e.g., for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'
"""
if s in self.aliasd:
return s + ''.join([' or %s' % x
for x in sorted(self.aliasd[s])])
else:
return s
def aliased_name_rest(self, s, target):
"""
return 'PROPNAME or alias' if *s* has an alias, else return
PROPNAME formatted for ReST
e.g., for the line markerfacecolor property, which has an
alias, return 'markerfacecolor or mfc' and for the transform
property, which does not, return 'transform'
"""
if s in self.aliasd:
aliases = ''.join([' or %s' % x
for x in sorted(self.aliasd[s])])
else:
aliases = ''
return ':meth:`%s <%s>`%s' % (s, target, aliases)
def pprint_setters(self, prop=None, leadingspace=2):
"""
If *prop* is *None*, return a list of strings of all settable properies
and their valid values.
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of property : valid
values.
"""
if leadingspace:
pad = ' ' * leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return '%s%s: %s' % (pad, prop, accepts)
attrs = self._get_setters_and_targets()
attrs.sort()
lines = []
for prop, path in attrs:
accepts = self.get_valid_values(prop)
name = self.aliased_name(prop)
lines.append('%s%s: %s' % (pad, name, accepts))
return lines
def pprint_setters_rest(self, prop=None, leadingspace=2):
"""
If *prop* is *None*, return a list of strings of all settable properies
and their valid values. Format the output for ReST
If *prop* is not *None*, it is a valid property name and that
property will be returned as a string of property : valid
values.
"""
if leadingspace:
pad = ' ' * leadingspace
else:
pad = ''
if prop is not None:
accepts = self.get_valid_values(prop)
return '%s%s: %s' % (pad, prop, accepts)
attrs = self._get_setters_and_targets()
attrs.sort()
lines = []
########
names = [self.aliased_name_rest(prop, target)
for prop, target in attrs]
accepts = [self.get_valid_values(prop) for prop, target in attrs]
col0_len = max(len(n) for n in names)
col1_len = max(len(a) for a in accepts)
table_formatstr = pad + '=' * col0_len + ' ' + '=' * col1_len
lines.append('')
lines.append(table_formatstr)
lines.append(pad + 'Property'.ljust(col0_len + 3) +
'Description'.ljust(col1_len))
lines.append(table_formatstr)
lines.extend([pad + n.ljust(col0_len + 3) + a.ljust(col1_len)
for n, a in zip(names, accepts)])
lines.append(table_formatstr)
lines.append('')
return lines
def properties(self):
"""
return a dictionary mapping property name -> value
"""
o = self.oorig
getters = [name for name in dir(o)
if name.startswith('get_') and callable(getattr(o, name))]
getters.sort()
d = dict()
for name in getters:
func = getattr(o, name)
if self.is_alias(func):
continue
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
val = func()
except:
continue
else:
d[name[4:]] = val
return d
def pprint_getters(self):
"""
Return the getters and actual values as list of strings.
"""
lines = []
for name, val in sorted(six.iteritems(self.properties())):
if getattr(val, 'shape', ()) != () and len(val) > 6:
s = str(val[:6]) + '...'
else:
s = str(val)
s = s.replace('\n', ' ')
if len(s) > 50:
s = s[:50] + '...'
name = self.aliased_name(name)
lines.append(' %s = %s' % (name, s))
return lines
def getp(obj, property=None):
"""
Return the value of object's property. *property* is an optional string
for the property you want to return
Example usage::
getp(obj) # get all the object properties
getp(obj, 'linestyle') # get the linestyle property
*obj* is a :class:`Artist` instance, e.g.,
:class:`~matplotllib.lines.Line2D` or an instance of a
:class:`~matplotlib.axes.Axes` or :class:`matplotlib.text.Text`.
If the *property* is 'somename', this function returns
obj.get_somename()
:func:`getp` can be used to query all the gettable properties with
``getp(obj)``. Many properties have aliases for shorter typing, e.g.
'lw' is an alias for 'linewidth'. In the output, aliases and full
property names will be listed as:
property or alias = value
e.g.:
linewidth or lw = 2
"""
if property is None:
insp = ArtistInspector(obj)
ret = insp.pprint_getters()
print('\n'.join(ret))
return
func = getattr(obj, 'get_' + property)
return func()
# alias
get = getp
def setp(obj, *args, **kwargs):
"""
Set a property on an artist object.
matplotlib supports the use of :func:`setp` ("set property") and
:func:`getp` to set and get object properties, as well as to do
introspection on the object. For example, to set the linestyle of a
line to be dashed, you can do::
>>> line, = plot([1,2,3])
>>> setp(line, linestyle='--')
If you want to know the valid types of arguments, you can provide
the name of the property you want to set without a value::
>>> setp(line, 'linestyle')
linestyle: [ '-' | '--' | '-.' | ':' | 'steps' | 'None' ]
If you want to see all the properties that can be set, and their
possible values, you can do::
>>> setp(line)
... long output listing omitted
You may specify another output file to `setp` if `sys.stdout` is not
acceptable for some reason using the `file` keyword-only argument::
>>> with fopen('output.log') as f:
>>> setp(line, file=f)
:func:`setp` operates on a single instance or a iterable of
instances. If you are in query mode introspecting the possible
values, only the first instance in the sequence is used. When
actually setting values, all the instances will be set. e.g.,
suppose you have a list of two lines, the following will make both
lines thicker and red::
>>> x = arange(0,1.0,0.01)
>>> y1 = sin(2*pi*x)
>>> y2 = sin(4*pi*x)
>>> lines = plot(x, y1, x, y2)
>>> setp(lines, linewidth=2, color='r')
:func:`setp` works with the MATLAB style string/value pairs or
with python kwargs. For example, the following are equivalent::
>>> setp(lines, 'linewidth', 2, 'color', 'r') # MATLAB style
>>> setp(lines, linewidth=2, color='r') # python style
"""
if not cbook.iterable(obj):
objs = [obj]
else:
objs = list(cbook.flatten(obj))
if not objs:
return
insp = ArtistInspector(objs[0])
# file has to be popped before checking if kwargs is empty
printArgs = {}
if 'file' in kwargs:
printArgs['file'] = kwargs.pop('file')
if not kwargs and len(args) < 2:
if args:
print(insp.pprint_setters(prop=args[0]), **printArgs)
else:
print('\n'.join(insp.pprint_setters()), **printArgs)
return
if len(args) % 2:
raise ValueError('The set args must be string, value pairs')
# put args into ordereddict to maintain order
funcvals = OrderedDict()
for i in range(0, len(args) - 1, 2):
funcvals[args[i]] = args[i + 1]
ret = [o.update(funcvals) for o in objs]
ret.extend([o.set(**kwargs) for o in objs])
return [x for x in cbook.flatten(ret)]
def kwdoc(a):
hardcopy = matplotlib.rcParams['docstring.hardcopy']
if hardcopy:
return '\n'.join(ArtistInspector(a).pprint_setters_rest(
leadingspace=2))
else:
return '\n'.join(ArtistInspector(a).pprint_setters(leadingspace=2))
docstring.interpd.update(Artist=kwdoc(Artist))
_get_axes_msg = """{0} has been deprecated in mpl 1.5, please use the
axes property. A removal date has not been set."""
| mit |
jakobworldpeace/scikit-learn | sklearn/linear_model/coordinate_descent.py | 1 | 80266 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Gael Varoquaux <[email protected]>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import _preprocess_data
from ..utils import check_array, check_X_y
from ..utils.validation import check_random_state
from ..model_selection import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from ..exceptions import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 < l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. (currently not
supported) ``For l1_ratio = 1`` it is an L1 penalty. For
``0 < l1_ratio <1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
if l1_ratio == 0:
raise ValueError("Automatic alpha grid generation is not supported for"
" l1_ratio=0. Please supply a grid by providing "
"your estimator with the appropriate `alphas=` "
"argument.")
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = _preprocess_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept,
normalize,
return_mean=True)
mean_dot = X_offset * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_scale[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, return_n_iter=return_n_iter, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False,
check_input=True, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
check_input : bool, default True
Skip input validation checks, including the Gram matrix when provided
assuming there are handled by the caller when check_input=False.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py for an
example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
# We expect X and y to be already Fortran ordered when bypassing
# checks
if check_input:
X = check_array(X, 'csc', dtype=[np.float64, np.float32],
order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=X.dtype.type, order='F', copy=False,
ensure_2d=False)
if Xy is not None:
# Xy should be a 1d contiguous array or a 2D C ordered array
Xy = check_array(Xy, dtype=X.dtype.type, order='C', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_offset' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_offset'] / params['X_scale']
X_sparse_scaling = np.asarray(X_sparse_scaling, dtype=X.dtype)
else:
X_sparse_scaling = np.zeros(n_features, dtype=X.dtype)
# X should be normalized and fit already if function is called
# from ElasticNet.fit
if check_input:
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False,
fit_intercept=False, copy=False)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=X.dtype)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=X.dtype)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1], dtype=X.dtype))
else:
coef_ = np.asfortranarray(coef_init, dtype=X.dtype)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, dtype=X.dtype.type,
order='C')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like. Got %r" % precompute)
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations.' +
' Fitting data with very small alpha' +
' may cause precision problems.',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the penalty terms. Defaults to 1.0.
See the notes for the exact mathematical meaning of this
parameter.``alpha = 0`` is equivalent to an ordinary least square,
solved by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.
Given this, you should use the :class:`LinearRegression` object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : True | False | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. The Gram matrix can also be passed as argument.
For sparse input this option is always ``True`` to preserve sparsity.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y, check_input=True):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if isinstance(self.precompute, six.string_types):
raise ValueError('precompute should be one of True, False or'
' array-like. Got %r' % self.precompute)
# We expect X and y to be float64 or float32 Fortran ordered arrays
# when bypassing checks
if check_input:
X, y = check_X_y(X, y, accept_sparse='csc',
order='F', dtype=[np.float64, np.float32],
copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
y = check_array(y, order='F', copy=False, dtype=X.dtype.type,
ensure_2d=False)
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=False)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or not hasattr(self, "coef_"):
coef_ = np.zeros((n_targets, n_features), dtype=X.dtype,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=X.dtype)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_offset=X_offset, X_scale=X_scale, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection,
check_input=False)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_offset, y_offset, X_scale)
# workaround since _set_intercept will cast self.coef_ into X.dtype
self.coef_ = np.asarray(self.coef_, dtype=X.dtype)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted ``coef_`` """
return sparse.csr_matrix(self.coef_)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.
Given this, you should use the :class:`LinearRegression` object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | array-like, default=False
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_offset'] = X_offset
path_params['X_scale'] = X_scale
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_offset = np.atleast_1d(y_offset)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_scale)
coefs[:, nonzeros] /= X_scale[nonzeros][:, np.newaxis]
intercepts = y_offset[:, np.newaxis] - np.dot(X_offset, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matrices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = check_array(y, copy=False, dtype=[np.float64, np.float32],
ensure_2d=False)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if (hasattr(reference_to_old_X, "data") and
not np.may_share_memory(reference_to_old_X.data, X.data)):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=[np.float64, np.float32],
order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv.split(X))
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=X.dtype.type)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/plot_lasso_model_selection.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float or array of floats, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/plot_lasso_model_selection.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_elastic_net>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 1 the penalty is an L1/L2 penalty. For l1_ratio = 0 it
is an L2 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array.
Note that ``coef_`` stores the transpose of ``W``, ``W.T``.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskElasticNet model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
X = check_array(X, dtype=[np.float64, np.float32], order='F',
copy=self.copy_X and self.fit_intercept)
y = check_array(y, dtype=X.dtype.type, ensure_2d=False)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=X.dtype.type,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_offset, y_offset, X_scale)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations',
ConvergenceWarning)
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
Note that ``coef_`` stores the transpose of ``W``, ``W.T``.
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 1 the penalty is an L1/L2 penalty. For l1_ratio = 0 it
is an L2 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
Note that ``coef_`` stores the transpose of ``W``, ``W.T``.
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
Note that ``coef_`` stores the transpose of ``W``, ``W.T``.
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
| bsd-3-clause |
weinbe58/QuSpin | examples/scripts/example7.py | 3 | 6214 | from __future__ import print_function, division
import sys,os
# line 4 and line 5 below are for development purposes and can be removed
qspin_path = os.path.join(os.getcwd(),"../../")
sys.path.insert(0,qspin_path)
#####################################################################
# example 7 #
# In this script we demonstrate how to use QuSpin's to create #
# a ladder geometry and study quanch dynamics in the Bose-Hubbard #
# model. We also show how to compute the entanglement entropy of #
# bosonic systems. Last, we demonstrate how to use the block_ops #
# tool to decompose a state in the symemtry sectors of a #
# Hamiltonian, evolve the separate parts, and put back the state #
# in the end. #
#####################################################################
from quspin.operators import hamiltonian # Hamiltonians and operators
from quspin.basis import boson_basis_1d # bosonic Hilbert space
from quspin.tools.block_tools import block_ops # dynamics in symmetry blocks
import numpy as np # general math functions
import matplotlib.pyplot as plt # plotting library
import matplotlib.animation as animation # animating movie of dynamics
#
##### define model parameters
# initial seed for random number generator
np.random.seed(0) # seed is 0 to produce plots from QuSpin2 paper
# setting up parameters of simulation
L = 6 # length of chain
N = 2*L # number of sites
nb = 0.5 # density of bosons
sps = 3 # number of states per site
J_par_1 = 1.0 # top side of ladder hopping
J_par_2 = 1.0 # bottom side of ladder hopping
J_perp = 0.5 # rung hopping
U = 20.0 # Hubbard interaction
#
##### set up Hamiltonian and observables
# define site-coupling lists
int_list_1 = [[-0.5*U,i] for i in range(N)] # interaction $-U/2 \sum_i n_i$
int_list_2 = [[0.5*U,i,i] for i in range(N)] # interaction: $U/2 \num_i n_i^2$
# setting up hopping lists
hop_list = [[-J_par_1,i,(i+2)%N] for i in range(0,N,2)] # PBC bottom leg
hop_list.extend([[-J_par_2,i,(i+2)%N] for i in range(1,N,2)]) # PBC top leg
hop_list.extend([[-J_perp,i,i+1] for i in range(0,N,2)]) # perp/rung hopping
hop_list_hc = [[J.conjugate(),i,j] for J,i,j in hop_list] # add h.c. terms
# set up static and dynamic lists
static = [
["+-",hop_list], # hopping
["-+",hop_list_hc], # hopping h.c.
["nn",int_list_2], # U n_i^2
["n",int_list_1] # -U n_i
]
dynamic = [] # no dynamic operators
# create block_ops object
blocks=[dict(kblock=kblock) for kblock in range(L)] # blocks to project on to
baisis_args = (N,) # boson_basis_1d manditory arguments
basis_kwargs = dict(nb=nb,sps=sps,a=2) # boson_basis_1d optional args
get_proj_kwargs = dict(pcon=True) # set projection to full particle basis
H_block = block_ops(blocks,static,dynamic,boson_basis_1d,baisis_args,np.complex128,
basis_kwargs=basis_kwargs,get_proj_kwargs=get_proj_kwargs)
# setting up local Fock basis
basis = boson_basis_1d(N,nb=nb,sps=sps)
# setting up observables
no_checks = dict(check_herm=False,check_symm=False,check_pcon=False)
n_list = [hamiltonian([["n",[[1.0,i]]]],[],basis=basis,dtype=np.float64,**no_checks) for i in range(N)]
##### do time evolution
# set up initial state
i0 = np.random.randint(basis.Ns) # pick random state from basis set
psi = np.zeros(basis.Ns,dtype=np.float64)
psi[i0] = 1.0
# print info about setup
state_str = "".join(str(int((basis[i0]//basis.sps**(L-i-1)))%basis.sps) for i in range(N))
print("total H-space size: {}, initial state: |{}>".format(basis.Ns,state_str))
# setting up parameters for evolution
start,stop,num = 0,30,301 # 0.1 equally spaced points
times = np.linspace(start,stop,num)
# calculating the evolved states
n_jobs = 1 # paralelisation: increase to see if calculation runs faster!
psi_t = H_block.expm(psi,a=-1j,start=start,stop=stop,num=num,block_diag=False,n_jobs=n_jobs)
# calculating the local densities as a function of time
expt_n_t = np.vstack([n.expt_value(psi_t).real for n in n_list]).T
# reshape data for plotting
n_t = np.zeros((num,2,L))
n_t[:,0,:] = expt_n_t[:,0::2]
n_t[:,1,:] = expt_n_t[:,1::2]
# calculating entanglement entropy
sub_sys_A = range(0,N,2) # bottom side of ladder
gen = (basis.ent_entropy(psi,sub_sys_A=sub_sys_A)["Sent_A"] for psi in psi_t.T[:])
ent_t = np.fromiter(gen,dtype=np.float64,count=num)
# plotting static figures
#"""
fig, ax = plt.subplots(nrows=5,ncols=1)
im=[]
im_ind = []
for i,t in enumerate(np.logspace(-1,np.log10(stop-1),5,base=10)):
j = times.searchsorted(t)
im_ind.append(j)
im.append(ax[i].imshow(n_t[j],cmap="hot",vmax=n_t.max(),vmin=0))
ax[i].tick_params(labelbottom=False,labelleft=False)
cax = fig.add_axes([0.85, 0.1, 0.03, 0.8])
fig.colorbar(im[2],cax)
plt.savefig("boson_density.pdf")
plt.figure()
plt.plot(times,ent_t,lw=2)
plt.plot(times[im_ind],ent_t[im_ind],marker="o",linestyle="",color="red")
plt.xlabel("$Jt$",fontsize=20)
plt.ylabel("$s_\\mathrm{ent}(t)$",fontsize=20)
plt.grid()
plt.savefig("boson_entropy.pdf")
#plt.show()
plt.close()
#"""
# setting up two plots to animate side by side
fig, (ax1,ax2) = plt.subplots(1,2)
fig.set_size_inches(10, 5)
ax1.set_xlabel(r"$Jt$",fontsize=18)
ax1.set_ylabel(r"$s_\mathrm{ent}$",fontsize=18)
ax1.grid()
line1, = ax1.plot(times, ent_t, lw=2)
line1.set_data([],[])
im = ax2.matshow(n_t[0],cmap="hot")
fig.colorbar(im)
def run(i): # function to update frame
# set new data for plots
if i==num-1:
exit() # comment this line to retain last plot
line1.set_data(times[:i],ent_t[:i])
im.set_data(n_t[i])
return im, line1
# define and display animation
ani = animation.FuncAnimation(fig, run, range(num),interval=50,repeat=False)
plt.show()
plt.close()
#
"""
###### ladder lattice
# hopping coupling parameters:
# - : J_par_1
# = : J_par_2
# | : J_perp
#
# lattice graph
#
= 1 = 3 = 5 = 7 = 9 =
| | | | |
- 0 - 2 - 4 - 6 - 8 -
#
# translations along leg-direction (i -> i+2):
#
= 9 = 1 = 3 = 5 = 7 =
| | | | |
- 8 - 0 - 2 - 4 - 6 -
#
# if J_par_1=J_par_2, one can use regular chain parity (i -> N - i) as combination
# of the two ladder parity operators:
#
- 8 - 6 - 4 - 2 - 0 -
| | | | |
- 9 - 7 - 5 - 3 - 1 -
""" | bsd-3-clause |
aewhatley/scikit-learn | examples/svm/plot_svm_kernels.py | 329 | 1971 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
joergkappes/opengm | src/interfaces/python/examples/visu/full.py | 14 | 1157 | import numpy
import opengm
import matplotlib.pyplot as plt
f1=numpy.ones([2])
f2=numpy.ones([2,2])
"""
Full Connected (non-shared):
- all possible pairwise connections
- functions are *non* - shared
"""
numVar=4
gm=opengm.gm([2]*numVar)
for vi0 in xrange(numVar):
for vi1 in xrange(vi0+1,numVar):
gm.addFactor(gm.addFunction(f2),[vi0,vi1])
opengm.visualizeGm( gm,show=False,layout='neato',
iterations=1000,plotFunctions=True,
plotNonShared=True,relNodeSize=0.4)
plt.savefig("full_non_shared.png",bbox_inches='tight',dpi=300)
plt.close()
"""
Full Connected (shared):
- 5 variables
- 10 second order factors
(all possible pairwise connections)
- functions are *non* - shared
"""
numVar=4
gm=opengm.gm([2]*numVar)
fid2=gm.addFunction(f2)
for vi0 in xrange(numVar):
for vi1 in xrange(vi0+1,numVar):
gm.addFactor(fid2,[vi0,vi1])
opengm.visualizeGm( gm,show=False,layout='neato',
iterations=1000,plotFunctions=True,
plotNonShared=True,relNodeSize=0.4)
plt.savefig("full_shared.png",bbox_inches='tight',dpi=300)
plt.close() | mit |
Stargrazer82301/CAAPR | CAAPR/CAAPR_AstroMagic/PTS/pts/do/modeling/seba/plotDataset.py | 1 | 10887 | import aplpy
import pyparsing
import pyregion
import pyfits
import matplotlib.pyplot as pyplot
import matplotlib as mpl
path = 'FinalRun/'
fig = pyplot.figure(figsize=(9,10))
fig.text(0.385,0.97,"Offset from centre (degrees)",color='black',size='16',weight='bold')
fig.text(0.02,0.615,"Offset from centre (degrees)",color='black',size='16',weight='bold',rotation='vertical')
def standard_setup(sp):
sp.set_frame_color('black')
sp.set_tick_labels_font(size='10')
sp.set_axis_labels_font(size='12')
#sp.set_tick_labels_format(xformat='hh:mm',yformat='dd:mm')
sp.set_xaxis_coord_type('scalar')
sp.set_yaxis_coord_type('scalar')
sp.set_tick_color('black')
sp.recenter(x=0.0, y=0.0,width=3.,height=0.6)
sp.set_tick_xspacing(0.4)
sp.set_tick_yspacing(0.25)
sp.set_system_latex(True)
sp.tick_labels.hide()
sp.axis_labels.hide()
plotloc = [[0.09,0.825,0.3,0.115],[0.39,0.825,0.3,0.115],[0.69,0.825,0.3,0.115],
[0.09,0.710,0.3,0.115],[0.39,0.710,0.3,0.115],[0.69,0.710,0.3,0.115],
[0.09,0.595,0.3,0.115],[0.39,0.595,0.3,0.115],[0.69,0.595,0.3,0.115],
[0.09,0.480,0.3,0.115],[0.39,0.480,0.3,0.115],[0.69,0.480,0.3,0.115],
[0.09,0.365,0.3,0.115],[0.39,0.365,0.3,0.115],[0.69,0.365,0.3,0.115],
[0.09,0.250,0.3,0.115],[0.39,0.250,0.3,0.115],[0.69,0.250,0.3,0.115],
[0.09,0.135,0.3,0.115],[0.39,0.135,0.3,0.115],[0.69,0.135,0.3,0.115],
[0.09,0.020,0.3,0.115]]
# First row
f1 = aplpy.FITSFigure(path+'maps/plotimFUVJy.fits', figure=fig, subplot=plotloc[0])
standard_setup(f1)
#f1.show_colorscale(pmax=99.25, pmin=0.50, cmap='hot')
f1.show_colorscale(vmin=0, vmax=0.00025, cmap='hot')
f1.show_beam(major=0.01, minor=0.01, angle=0,fill=True,color='white')
#f1.axis_labels.show_y()
f1.tick_labels.set_xposition('top')
f1.tick_labels.show()
f2 = aplpy.FITSFigure(path+'maps/plotimNUVJy.fits', figure=fig, subplot=plotloc[1])
standard_setup(f2)
#f2.show_colorscale(pmax=99.25, pmin=0.50, cmap='hot')
f2.show_colorscale(vmin=0, vmax=0.0004, cmap='hot')
f2.show_beam(major=0.01, minor=0.01, angle=0,fill=True,color='white')
f2.tick_labels.set_xposition('top')
f2.tick_labels.show_x()
f3 = aplpy.FITSFigure(path+'maps/plotimuJy.fits', figure=fig, subplot=plotloc[2])
standard_setup(f3)
#f3.show_colorscale(pmax=99.25, pmin=0.50, cmap='hot')
f3.show_colorscale(vmin=0, vmax=0.004, cmap='hot')
f3.show_beam(major=0.01, minor=0.01, angle=0,fill=True,color='white')
f3.tick_labels.set_xposition('top')
f3.tick_labels.show_x()
# Next rows
f4 = aplpy.FITSFigure(path+'maps/plotimgJy.fits', figure=fig, subplot=plotloc[3])
standard_setup(f4)
#f4.show_colorscale(pmax=99.25, pmin=0.50, cmap='hot')
f4.show_colorscale(vmin=0, vmax=0.015, cmap='hot')
f4.show_beam(major=0.01, minor=0.01, angle=0,fill=True,color='white')
#f4.axis_labels.show_y()
f4.tick_labels.show_y()
f5 = aplpy.FITSFigure(path+'maps/plotimrJy.fits', figure=fig, subplot=plotloc[4])
standard_setup(f5)
#f5.show_colorscale(pmax=99.25, pmin=0.50, cmap='hot')
f5.show_colorscale(vmin=0, vmax=0.045, cmap='hot')
f5.show_beam(major=0.01, minor=0.01, angle=0,fill=True,color='white')
f6 = aplpy.FITSFigure(path+'maps/plotimiJy.fits', figure=fig, subplot=plotloc[5])
standard_setup(f6)
#f6.show_colorscale(pmax=99.25, pmin=0.50, cmap='hot')
f6.show_colorscale(vmin=0, vmax=0.05, cmap='hot')
f6.show_beam(major=0.01, minor=0.01, angle=0,fill=True,color='white')
f7 = aplpy.FITSFigure(path+'maps/plotimzJy.fits', figure=fig, subplot=plotloc[6])
standard_setup(f7)
#f7.show_colorscale(pmax=99.25, pmin=0.50, cmap='hot')
f7.show_colorscale(vmin=0, vmax=0.07, cmap='hot')
f7.show_beam(major=0.01, minor=0.01, angle=0,fill=True,color='white')
#f7.axis_labels.show_y()
f7.tick_labels.show_y()
f8 = aplpy.FITSFigure(path+'maps/plotimW1Jy.fits', figure=fig, subplot=plotloc[7])
standard_setup(f8)
#f8.show_colorscale(pmax=99.25, pmin=0.50, cmap='hot')
f8.show_colorscale(vmin=0, vmax=0.075, cmap='hot')
f8.show_beam(major=0.01, minor=0.01, angle=0,fill=True,color='white')
f9 = aplpy.FITSFigure(path+'maps/plotim3.6Jy.fits', figure=fig, subplot=plotloc[8])
standard_setup(f9)
#f9.show_colorscale(pmax=99.25, pmin=0.50, cmap='hot')
f9.show_colorscale(vmin=0, vmax=0.075, cmap='hot')
f9.show_beam(major=0.01, minor=0.01, angle=0,fill=True,color='white')
f10 = aplpy.FITSFigure(path+'maps/plotim4.5Jy.fits', figure=fig, subplot=plotloc[9])
standard_setup(f10)
#f10.show_colorscale(pmax=99.25, pmin=0.50, cmap='hot')
f10.show_colorscale(vmin=0, vmax=0.055, cmap='hot')
f10.show_beam(major=0.01, minor=0.01, angle=0,fill=True,color='white')
#f10.axis_labels.show_y()
f10.tick_labels.show_y()
f11 = aplpy.FITSFigure(path+'maps/plotimW2Jy.fits', figure=fig, subplot=plotloc[10])
standard_setup(f11)
#f11.show_colorscale(pmax=99.25, pmin=0.50, cmap='hot')
f11.show_colorscale(vmin=0, vmax=0.055, cmap='hot')
f11.show_beam(major=0.01, minor=0.01, angle=0,fill=True,color='white')
f12 = aplpy.FITSFigure(path+'maps/plotim5.8Jy.fits', figure=fig, subplot=plotloc[11])
standard_setup(f12)
#f12.show_colorscale(pmax=99.25, pmin=0.50, cmap='hot')
f12.show_colorscale(vmin=0, vmax=0.075, cmap='hot')
f12.show_beam(major=0.01, minor=0.01, angle=0,fill=True,color='white')
f13 = aplpy.FITSFigure(path+'maps/plotim8Jy.fits', figure=fig, subplot=plotloc[12])
standard_setup(f13)
#f13.show_colorscale(pmax=99.25, pmin=0.50, cmap='hot')
f13.show_colorscale(vmin=0, vmax=0.08, cmap='hot')
f13.show_beam(major=0.01, minor=0.01, angle=0,fill=True,color='white')
#f13.axis_labels.show_y()
f13.tick_labels.show_y()
f14 = aplpy.FITSFigure(path+'maps/plotimW3Jy.fits', figure=fig, subplot=plotloc[13])
standard_setup(f14)
#f14.show_colorscale(pmax=99.25, pmin=0.50, cmap='hot')
f14.show_colorscale(vmin=0, vmax=0.06, cmap='hot')
f14.show_beam(major=0.01, minor=0.01, angle=0,fill=True,color='white')
f15 = aplpy.FITSFigure(path+'maps/plotimW4Jy.fits', figure=fig, subplot=plotloc[14])
standard_setup(f15)
#f15.show_colorscale(pmax=99.25, pmin=0.50, cmap='hot')
f15.show_colorscale(vmin=0, vmax=0.05, cmap='hot')
f15.show_beam(major=0.01, minor=0.01, angle=0,fill=True,color='white')
f16 = aplpy.FITSFigure(path+'maps/plotim24Jy.fits', figure=fig, subplot=plotloc[15])
standard_setup(f16)
#f16.show_colorscale(pmax=99.25, pmin=0.50, cmap='hot')
f16.show_colorscale(vmin=0, vmax=0.035, cmap='hot')
f16.show_beam(major=0.01, minor=0.01, angle=0,fill=True,color='white')
#f16.axis_labels.show_y()
f16.tick_labels.show_y()
f17 = aplpy.FITSFigure(path+'maps/plotim70Jy.fits', figure=fig, subplot=plotloc[16])
standard_setup(f17)
#f17.show_colorscale(pmax=99.25, pmin=0.50, cmap='hot')
f17.show_colorscale(vmin=0, vmax=0.35, cmap='hot')
f17.show_beam(major=0.01, minor=0.01, angle=0,fill=True,color='white')
f18 = aplpy.FITSFigure(path+'maps/plotim100Jy.fits', figure=fig, subplot=plotloc[17])
standard_setup(f18)
#f18.show_colorscale(pmax=99.25, pmin=0.50, cmap='hot')
f18.show_colorscale(vmin=0, vmax=0.7, cmap='hot')
f18.show_beam(major=0.01, minor=0.01, angle=0,fill=True,color='white')
f19 = aplpy.FITSFigure(path+'maps/plotim160Jy.fits', figure=fig, subplot=plotloc[18])
standard_setup(f19)
#f19.show_colorscale(pmax=99.25, pmin=0.50, cmap='hot')
f19.show_colorscale(vmin=0, vmax=1.3, cmap='hot')
f19.show_beam(major=0.01, minor=0.01, angle=0,fill=True,color='white')
#f19.axis_labels.show_y()
f19.tick_labels.show_y()
f20 = aplpy.FITSFigure(path+'maps/plotim250Jy.fits', figure=fig, subplot=plotloc[19])
standard_setup(f20)
#f20.show_colorscale(pmax=99.25, pmin=0.50, cmap='hot')
f20.show_colorscale(vmin=0, vmax=1.3, cmap='hot')
f20.show_beam(major=0.01, minor=0.01, angle=0,fill=True,color='white')
#f20.axis_labels.show_x()
f20.tick_labels.show_x()
f21 = aplpy.FITSFigure(path+'maps/plotim350Jy.fits', figure=fig, subplot=plotloc[20])
standard_setup(f21)
#f21.show_colorscale(pmax=99.25, pmin=0.50, cmap='hot')
f21.show_colorscale(vmin=0, vmax=0.6, cmap='hot')
f21.show_beam(major=0.01, minor=0.01, angle=0,fill=True,color='white')
#f21.axis_labels.show_x()
f21.tick_labels.show_x()
f22 = aplpy.FITSFigure(path+'maps/plotim500Jy.fits', figure=fig, subplot=plotloc[21])
standard_setup(f22)
#f22.show_colorscale(pmax=99.25, pmin=0.50, cmap='hot')
f22.show_colorscale(vmin=0, vmax=0.3, cmap='hot')
f22.show_beam(major=0.01, minor=0.01, angle=0,fill=True,color='white')
#f22.axis_labels.show()
f22.tick_labels.show()
# Add a colourbar
axisf3 = fig.add_axes([0.45,0.07,0.5,0.02])
cmapf3 = mpl.cm.hot
normf3 = mpl.colors.Normalize(vmin=0, vmax=1)
cbf3 = mpl.colorbar.ColorbarBase(axisf3, cmap=cmapf3, norm=normf3, orientation='horizontal')
cbf3.set_label('Flux (arbitrary units)')
# Add labels
fig.text(0.38,0.915,"FUV",color='white',size='14',weight='bold', horizontalalignment='right')
fig.text(0.68,0.915,"NUV",color='white',size='14',weight='bold', horizontalalignment='right')
fig.text(0.98,0.915,"u",color='white',size='14',weight='bold', horizontalalignment='right')
fig.text(0.38,0.8,"g",color='white',size='14',weight='bold', horizontalalignment='right')
fig.text(0.68,0.8,"r",color='white',size='14',weight='bold', horizontalalignment='right')
fig.text(0.98,0.8,"i",color='white',size='14',weight='bold', horizontalalignment='right')
fig.text(0.38,0.685,"z",color='white',size='14',weight='bold', horizontalalignment='right')
fig.text(0.68,0.685,"W1",color='white',size='14',weight='bold', horizontalalignment='right')
fig.text(0.98,0.685,"3.6",color='white',size='14',weight='bold', horizontalalignment='right')
fig.text(0.38,0.57,"4.5",color='white',size='14',weight='bold', horizontalalignment='right')
fig.text(0.68,0.57,"W2",color='white',size='14',weight='bold', horizontalalignment='right')
fig.text(0.98,0.57,"5.8",color='white',size='14',weight='bold', horizontalalignment='right')
fig.text(0.38,0.455,"8",color='white',size='14',weight='bold', horizontalalignment='right')
fig.text(0.68,0.455,"W3",color='white',size='14',weight='bold', horizontalalignment='right')
fig.text(0.98,0.455,"W4",color='white',size='14',weight='bold', horizontalalignment='right')
fig.text(0.38,0.34,"24",color='white',size='14',weight='bold', horizontalalignment='right')
fig.text(0.68,0.34,"70",color='white',size='14',weight='bold', horizontalalignment='right')
fig.text(0.98,0.34,"100",color='white',size='14',weight='bold', horizontalalignment='right')
fig.text(0.38,0.225,"160",color='white',size='14',weight='bold', horizontalalignment='right')
fig.text(0.68,0.225,"250",color='white',size='14',weight='bold', horizontalalignment='right')
fig.text(0.98,0.225,"350",color='white',size='14',weight='bold', horizontalalignment='right')
fig.text(0.38,0.11,"500",color='white',size='14',weight='bold', horizontalalignment='right')
fig.patch.set_facecolor('#3f3f3f')
fig.canvas.draw()
#fig.savefig("ngc891_figure.eps", dpi=300)
fig.savefig(path+"dataset.eps", dpi=600)
#fig.savefig("ngc891_figure.png", dpi=300)
pyplot.show() | mit |
rupakc/Kaggle-Compendium | Santander Product Recommendation/santander-baseline.py | 1 | 3394 | import pandas as pd
from sklearn.multiclass import OutputCodeClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multioutput import MultiOutputClassifier
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Imputer
from sklearn.ensemble import RandomForestClassifier
import numpy as np
def get_multioutput_classifier(classifier):
multi = MultiOutputClassifier(estimator=classifier)
return [multi], ["Multioutput Classifier"]
def get_multiclass_classifier(base_estimator):
output_code = OutputCodeClassifier(base_estimator,random_state=42)
one_vs_one = OneVsOneClassifier(base_estimator)
one_vs_all = OneVsRestClassifier(base_estimator)
return [output_code,one_vs_one,one_vs_all], ['Output Code','One Vs One','One Vs All']
def label_encode_frame(dataframe):
columns = dataframe.columns
encoder = LabelEncoder()
for column in columns:
if type(dataframe[column][0]) is np.nan:
for i in range(len(dataframe)):
if i > 1000:
break
if type(dataframe[column][i]) is str:
dataframe[column] = encoder.fit_transform(dataframe[column].values)
break
elif type(dataframe[column][0]) is str:
dataframe[column] = encoder.fit_transform(dataframe[column].values)
return dataframe
def spilt_date(list_of_date_string,separator='-',format='yyyy-mm-dd'):
month_list = list([])
day_list = list([])
year_list = list([])
for date_string in list_of_date_string:
date_list = str(date_string).strip().split(separator)
month_list.append(date_list[1])
day_list.append(date_list[2])
year_list.append(date_list[0])
return month_list,day_list,year_list
def print_evaluation_metrics(trained_model,trained_model_name,X_test,y_test):
print '--------- For Model : ', trained_model_name, ' ---------------\n'
predicted_values = trained_model.predict(X_test)
print predicted_values[0]
print '---------'
print y_test[0]
print metrics.classification_report(y_test,predicted_values)
print "Accuracy Score : ",metrics.accuracy_score(y_test,predicted_values)
print "---------------------------------------\n"
reco_frame = pd.read_csv('train_ver2.csv')
reco_value_frame = reco_frame[list(reco_frame.columns[25:49])]
columns_reco = list(range(21,44))
impute = Imputer()
reco_frame.drop(reco_frame.columns[[5,8,11,15]],axis=1,inplace=True)
reco_frame.drop(reco_frame.columns[columns_reco],axis=1,inplace=True)
del reco_frame['fecha_dato']
del reco_frame['fecha_alta']
encoded_frame = label_encode_frame(reco_frame)
encoded_frame = encoded_frame.head(1000)
imputed_values = impute.fit_transform(encoded_frame.values)
rf = RandomForestClassifier(n_estimators=101,min_samples_split=5,min_samples_leaf=7,random_state=42)
X_train,X_test,y_train,y_test = train_test_split(imputed_values,reco_value_frame.values,test_size=0.2,random_state=42)
classifier_list, classifier_name_list = get_multiclass_classifier(rf)
for classifier,classifier_name in zip(classifier_list,classifier_name_list):
classifier.fit(X_train,y_train)
print_evaluation_metrics(classifier,classifier_name,X_test,y_test)
| mit |
crichardson17/starburst_atlas | Low_resolution_sims/DustFree_LowRes/Padova_cont_fullelines/MoreLines2.py | 3 | 7229 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith(".grd"):
inputfile = file
for file in os.listdir('.'):
if file.endswith(".txt"):
inputfile2 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid = [];
with open(inputfile, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid.append(row);
grid = asarray(grid)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines.append(row);
dataEmissionlines = asarray(dataEmissionlines)
print "import files complete"
# ---------------------------------------------------
#for grid
phi_values = grid[1:len(dataEmissionlines)+1,6]
hdens_values = grid[1:len(dataEmissionlines)+1,7]
#for lines
headers = headers[1:]
Emissionlines = dataEmissionlines[:, 1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(Emissionlines[0]),4))
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = Emissionlines[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(Emissionlines),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
#change desired lines here!
line = [112,113,114,115,116,117,118,119,120,121,122,123,124,125,126]
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("More Lines 2", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('MoreLines2.pdf')
plt.clf()
| gpl-2.0 |
ycaihua/scikit-learn | sklearn/manifold/locally_linear.py | 21 | 24928 | """Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <[email protected]>
# Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = np.asarray(X)
Z = np.asarray(Z)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
if X.dtype.kind == 'i':
X = X.astype(np.float)
if Z.dtype.kind == 'i':
Z = Z.astype(np.float)
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
v0 = random_state.rand(M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None):
"""Perform a Locally Linear Embedding analysis on the data.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
#build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1]
* U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
#find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
#choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
#find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
#calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
#find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
#Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float)
for i in range(N):
s_i = s_range[i]
#select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
#compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
#Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
#Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
#We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h)
+ (1 - alpha_i) * w_reg[i, :, None])
#Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
#We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| bsd-3-clause |
jseabold/scikit-learn | examples/svm/plot_svm_kernels.py | 329 | 1971 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
bnaul/scikit-learn | sklearn/preprocessing/_discretization.py | 3 | 13192 | # -*- coding: utf-8 -*-
# Author: Henry Lin <[email protected]>
# Tom Dupré la Tour
# License: BSD
import numbers
import numpy as np
import warnings
from . import OneHotEncoder
from ..base import BaseEstimator, TransformerMixin
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _deprecate_positional_args
class KBinsDiscretizer(TransformerMixin, BaseEstimator):
"""
Bin continuous data into intervals.
Read more in the :ref:`User Guide <preprocessing_discretization>`.
.. versionadded:: 0.20
Parameters
----------
n_bins : int or array-like, shape (n_features,) (default=5)
The number of bins to produce. Raises ValueError if ``n_bins < 2``.
encode : {'onehot', 'onehot-dense', 'ordinal'}, (default='onehot')
Method used to encode the transformed result.
onehot
Encode the transformed result with one-hot encoding
and return a sparse matrix. Ignored features are always
stacked to the right.
onehot-dense
Encode the transformed result with one-hot encoding
and return a dense array. Ignored features are always
stacked to the right.
ordinal
Return the bin identifier encoded as an integer value.
strategy : {'uniform', 'quantile', 'kmeans'}, (default='quantile')
Strategy used to define the widths of the bins.
uniform
All bins in each feature have identical widths.
quantile
All bins in each feature have the same number of points.
kmeans
Values in each bin have the same nearest center of a 1D k-means
cluster.
dtype : {np.float32, np.float64}, default=None
The desired data-type for the output. If None, output dtype is
consistent with input dtype. Only np.float32 and np.float64 are
supported.
Attributes
----------
n_bins_ : int array, shape (n_features,)
Number of bins per feature. Bins whose width are too small
(i.e., <= 1e-8) are removed with a warning.
bin_edges_ : array of arrays, shape (n_features, )
The edges of each bin. Contain arrays of varying shapes ``(n_bins_, )``
Ignored features will have empty arrays.
See Also
--------
sklearn.preprocessing.Binarizer : Class used to bin values as ``0`` or
``1`` based on a parameter ``threshold``.
Notes
-----
In bin edges for feature ``i``, the first and last values are used only for
``inverse_transform``. During transform, bin edges are extended to::
np.concatenate([-np.inf, bin_edges_[i][1:-1], np.inf])
You can combine ``KBinsDiscretizer`` with
:class:`~sklearn.compose.ColumnTransformer` if you only want to preprocess
part of the features.
``KBinsDiscretizer`` might produce constant features (e.g., when
``encode = 'onehot'`` and certain bins do not contain any data).
These features can be removed with feature selection algorithms
(e.g., :class:`~sklearn.feature_selection.VarianceThreshold`).
Examples
--------
>>> X = [[-2, 1, -4, -1],
... [-1, 2, -3, -0.5],
... [ 0, 3, -2, 0.5],
... [ 1, 4, -1, 2]]
>>> est = KBinsDiscretizer(n_bins=3, encode='ordinal', strategy='uniform')
>>> est.fit(X)
KBinsDiscretizer(...)
>>> Xt = est.transform(X)
>>> Xt # doctest: +SKIP
array([[ 0., 0., 0., 0.],
[ 1., 1., 1., 0.],
[ 2., 2., 2., 1.],
[ 2., 2., 2., 2.]])
Sometimes it may be useful to convert the data back into the original
feature space. The ``inverse_transform`` function converts the binned
data into the original feature space. Each value will be equal to the mean
of the two bin edges.
>>> est.bin_edges_[0]
array([-2., -1., 0., 1.])
>>> est.inverse_transform(Xt)
array([[-1.5, 1.5, -3.5, -0.5],
[-0.5, 2.5, -2.5, -0.5],
[ 0.5, 3.5, -1.5, 0.5],
[ 0.5, 3.5, -1.5, 1.5]])
"""
@_deprecate_positional_args
def __init__(self, n_bins=5, encode='onehot', strategy='quantile',
dtype=None):
self.n_bins = n_bins
self.encode = encode
self.strategy = strategy
self.dtype = dtype
def fit(self, X, y=None):
"""
Fit the estimator.
Parameters
----------
X : numeric array-like, shape (n_samples, n_features)
Data to be discretized.
y : None
Ignored. This parameter exists only for compatibility with
:class:`~sklearn.pipeline.Pipeline`.
Returns
-------
self
"""
X = self._validate_data(X, dtype='numeric')
supported_dtype = (np.float64, np.float32)
if self.dtype in supported_dtype:
output_dtype = self.dtype
elif self.dtype is None:
output_dtype = X.dtype
else:
raise ValueError(
f"Valid options for 'dtype' are "
f"{supported_dtype + (None,)}. Got dtype={self.dtype} "
f" instead."
)
valid_encode = ('onehot', 'onehot-dense', 'ordinal')
if self.encode not in valid_encode:
raise ValueError("Valid options for 'encode' are {}. "
"Got encode={!r} instead."
.format(valid_encode, self.encode))
valid_strategy = ('uniform', 'quantile', 'kmeans')
if self.strategy not in valid_strategy:
raise ValueError("Valid options for 'strategy' are {}. "
"Got strategy={!r} instead."
.format(valid_strategy, self.strategy))
n_features = X.shape[1]
n_bins = self._validate_n_bins(n_features)
bin_edges = np.zeros(n_features, dtype=object)
for jj in range(n_features):
column = X[:, jj]
col_min, col_max = column.min(), column.max()
if col_min == col_max:
warnings.warn("Feature %d is constant and will be "
"replaced with 0." % jj)
n_bins[jj] = 1
bin_edges[jj] = np.array([-np.inf, np.inf])
continue
if self.strategy == 'uniform':
bin_edges[jj] = np.linspace(col_min, col_max, n_bins[jj] + 1)
elif self.strategy == 'quantile':
quantiles = np.linspace(0, 100, n_bins[jj] + 1)
bin_edges[jj] = np.asarray(np.percentile(column, quantiles))
elif self.strategy == 'kmeans':
from ..cluster import KMeans # fixes import loops
# Deterministic initialization with uniform spacing
uniform_edges = np.linspace(col_min, col_max, n_bins[jj] + 1)
init = (uniform_edges[1:] + uniform_edges[:-1])[:, None] * 0.5
# 1D k-means procedure
km = KMeans(n_clusters=n_bins[jj], init=init, n_init=1)
centers = km.fit(column[:, None]).cluster_centers_[:, 0]
# Must sort, centers may be unsorted even with sorted init
centers.sort()
bin_edges[jj] = (centers[1:] + centers[:-1]) * 0.5
bin_edges[jj] = np.r_[col_min, bin_edges[jj], col_max]
# Remove bins whose width are too small (i.e., <= 1e-8)
if self.strategy in ('quantile', 'kmeans'):
mask = np.ediff1d(bin_edges[jj], to_begin=np.inf) > 1e-8
bin_edges[jj] = bin_edges[jj][mask]
if len(bin_edges[jj]) - 1 != n_bins[jj]:
warnings.warn('Bins whose width are too small (i.e., <= '
'1e-8) in feature %d are removed. Consider '
'decreasing the number of bins.' % jj)
n_bins[jj] = len(bin_edges[jj]) - 1
self.bin_edges_ = bin_edges
self.n_bins_ = n_bins
if 'onehot' in self.encode:
self._encoder = OneHotEncoder(
categories=[np.arange(i) for i in self.n_bins_],
sparse=self.encode == 'onehot',
dtype=output_dtype)
# Fit the OneHotEncoder with toy datasets
# so that it's ready for use after the KBinsDiscretizer is fitted
self._encoder.fit(np.zeros((1, len(self.n_bins_))))
return self
def _validate_n_bins(self, n_features):
"""Returns n_bins_, the number of bins per feature.
"""
orig_bins = self.n_bins
if isinstance(orig_bins, numbers.Number):
if not isinstance(orig_bins, numbers.Integral):
raise ValueError("{} received an invalid n_bins type. "
"Received {}, expected int."
.format(KBinsDiscretizer.__name__,
type(orig_bins).__name__))
if orig_bins < 2:
raise ValueError("{} received an invalid number "
"of bins. Received {}, expected at least 2."
.format(KBinsDiscretizer.__name__, orig_bins))
return np.full(n_features, orig_bins, dtype=int)
n_bins = check_array(orig_bins, dtype=int, copy=True,
ensure_2d=False)
if n_bins.ndim > 1 or n_bins.shape[0] != n_features:
raise ValueError("n_bins must be a scalar or array "
"of shape (n_features,).")
bad_nbins_value = (n_bins < 2) | (n_bins != orig_bins)
violating_indices = np.where(bad_nbins_value)[0]
if violating_indices.shape[0] > 0:
indices = ", ".join(str(i) for i in violating_indices)
raise ValueError("{} received an invalid number "
"of bins at indices {}. Number of bins "
"must be at least 2, and must be an int."
.format(KBinsDiscretizer.__name__, indices))
return n_bins
def transform(self, X):
"""
Discretize the data.
Parameters
----------
X : numeric array-like, shape (n_samples, n_features)
Data to be discretized.
Returns
-------
Xt : numeric array-like or sparse matrix
Data in the binned space.
"""
check_is_fitted(self)
# check input and attribute dtypes
dtype = (np.float64, np.float32) if self.dtype is None else self.dtype
Xt = check_array(X, copy=True, dtype=dtype)
n_features = self.n_bins_.shape[0]
if Xt.shape[1] != n_features:
raise ValueError("Incorrect number of features. Expecting {}, "
"received {}.".format(n_features, Xt.shape[1]))
bin_edges = self.bin_edges_
for jj in range(Xt.shape[1]):
# Values which are close to a bin edge are susceptible to numeric
# instability. Add eps to X so these values are binned correctly
# with respect to their decimal truncation. See documentation of
# numpy.isclose for an explanation of ``rtol`` and ``atol``.
rtol = 1.e-5
atol = 1.e-8
eps = atol + rtol * np.abs(Xt[:, jj])
Xt[:, jj] = np.digitize(Xt[:, jj] + eps, bin_edges[jj][1:])
np.clip(Xt, 0, self.n_bins_ - 1, out=Xt)
if self.encode == 'ordinal':
return Xt
dtype_init = None
if 'onehot' in self.encode:
dtype_init = self._encoder.dtype
self._encoder.dtype = Xt.dtype
try:
Xt_enc = self._encoder.transform(Xt)
finally:
# revert the initial dtype to avoid modifying self.
self._encoder.dtype = dtype_init
return Xt_enc
def inverse_transform(self, Xt):
"""
Transform discretized data back to original feature space.
Note that this function does not regenerate the original data
due to discretization rounding.
Parameters
----------
Xt : numeric array-like, shape (n_sample, n_features)
Transformed data in the binned space.
Returns
-------
Xinv : numeric array-like
Data in the original feature space.
"""
check_is_fitted(self)
if 'onehot' in self.encode:
Xt = self._encoder.inverse_transform(Xt)
Xinv = check_array(Xt, copy=True, dtype=(np.float64, np.float32))
n_features = self.n_bins_.shape[0]
if Xinv.shape[1] != n_features:
raise ValueError("Incorrect number of features. Expecting {}, "
"received {}.".format(n_features, Xinv.shape[1]))
for jj in range(n_features):
bin_edges = self.bin_edges_[jj]
bin_centers = (bin_edges[1:] + bin_edges[:-1]) * 0.5
Xinv[:, jj] = bin_centers[np.int_(Xinv[:, jj])]
return Xinv
| bsd-3-clause |
rohanp/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 181 | 15664 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
import scipy.sparse as sp
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator='sparse',
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(sp.issparse(Y))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
| bsd-3-clause |
mmottahedi/nilmtk | nilmtk/tests/test_combinatorial_optimisation.py | 5 | 1396 | #!/usr/bin/python
from __future__ import print_function, division
import unittest
from os.path import join
from os import remove
import pandas as pd
from datetime import timedelta
from testingtools import data_dir
from nilmtk.datastore import HDFDataStore
from nilmtk import TimeFrame
from nilmtk import DataSet, TimeFrame
from nilmtk.disaggregate import CombinatorialOptimisation
from nilmtk import HDFDataStore
class TestCO(unittest.TestCase):
@classmethod
def setUpClass(cls):
filename = join(data_dir(), 'co_test.h5')
cls.dataset = DataSet(filename)
@classmethod
def tearDownClass(cls):
cls.dataset.store.close()
def test_co_correctness(self):
elec = self.dataset.buildings[1].elec
co = CombinatorialOptimisation()
co.train(elec)
mains = elec.mains()
output = HDFDataStore('output.h5', 'w')
co.disaggregate(mains, output, resample_seconds=1)
for meter in range(2, 4):
df1 = output.store.get('/building1/elec/meter{}'.format(meter))
df2 = self.dataset.store.store.get(
'/building1/elec/meter{}'.format(meter))
self.assertEqual((df1 == df2).sum().values[0], len(df1.index))
self.assertEqual(len(df1.index), len(df2.index))
output.close()
remove("output.h5")
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
nolanliou/tensorflow | tensorflow/contrib/distributions/python/ops/mixture.py | 8 | 20696 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Mixture distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution_util as distribution_utils
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
class Mixture(distribution.Distribution):
"""Mixture distribution.
The `Mixture` object implements batched mixture distributions.
The mixture model is defined by a `Categorical` distribution (the mixture)
and a python list of `Distribution` objects.
Methods supported include `log_prob`, `prob`, `mean`, `sample`, and
`entropy_lower_bound`.
#### Examples
```python
# Create a mixture of two Gaussians:
tfd = tf.contrib.distributions
mix = 0.3
bimix_gauss = tfd.Mixture(
cat=tfd.Categorical(probs=[mix, 1.-mix]),
components=[
tfd.Normal(loc=-1., scale=0.1),
tfd.Normal(loc=+1., scale=0.5),
])
# Plot the PDF.
import matplotlib.pyplot as plt
x = tf.linspace(-2., 3., int(1e4)).eval()
plt.plot(x, bimix_gauss.prob(x).eval());
```
"""
def __init__(self,
cat,
components,
validate_args=False,
allow_nan_stats=True,
use_static_graph=False,
name="Mixture"):
"""Initialize a Mixture distribution.
A `Mixture` is defined by a `Categorical` (`cat`, representing the
mixture probabilities) and a list of `Distribution` objects
all having matching dtype, batch shape, event shape, and continuity
properties (the components).
The `num_classes` of `cat` must be possible to infer at graph construction
time and match `len(components)`.
Args:
cat: A `Categorical` distribution instance, representing the probabilities
of `distributions`.
components: A list or tuple of `Distribution` instances.
Each instance must have the same type, be defined on the same domain,
and have matching `event_shape` and `batch_shape`.
validate_args: Python `bool`, default `False`. If `True`, raise a runtime
error if batch or event ranks are inconsistent between cat and any of
the distributions. This is only checked if the ranks cannot be
determined statically at graph construction time.
allow_nan_stats: Boolean, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
use_static_graph: Calls to `sample` will not rely on dynamic tensor
indexing, allowing for some static graph compilation optimizations, but
at the expense of sampling all underlying distributions in the mixture.
(Possibly useful when running on TPUs).
Default value: `False` (i.e., use dynamic indexing).
name: A name for this distribution (optional).
Raises:
TypeError: If cat is not a `Categorical`, or `components` is not
a list or tuple, or the elements of `components` are not
instances of `Distribution`, or do not have matching `dtype`.
ValueError: If `components` is an empty list or tuple, or its
elements do not have a statically known event rank.
If `cat.num_classes` cannot be inferred at graph creation time,
or the constant value of `cat.num_classes` is not equal to
`len(components)`, or all `components` and `cat` do not have
matching static batch shapes, or all components do not
have matching static event shapes.
"""
parameters = locals()
if not isinstance(cat, categorical.Categorical):
raise TypeError("cat must be a Categorical distribution, but saw: %s" %
cat)
if not components:
raise ValueError("components must be a non-empty list or tuple")
if not isinstance(components, (list, tuple)):
raise TypeError("components must be a list or tuple, but saw: %s" %
components)
if not all(isinstance(c, distribution.Distribution) for c in components):
raise TypeError(
"all entries in components must be Distribution instances"
" but saw: %s" % components)
dtype = components[0].dtype
if not all(d.dtype == dtype for d in components):
raise TypeError("All components must have the same dtype, but saw "
"dtypes: %s" % [(d.name, d.dtype) for d in components])
static_event_shape = components[0].event_shape
static_batch_shape = cat.batch_shape
for d in components:
static_event_shape = static_event_shape.merge_with(d.event_shape)
static_batch_shape = static_batch_shape.merge_with(d.batch_shape)
if static_event_shape.ndims is None:
raise ValueError(
"Expected to know rank(event_shape) from components, but "
"none of the components provide a static number of ndims")
# Ensure that all batch and event ndims are consistent.
with ops.name_scope(name, values=[cat.logits]):
num_components = cat.event_size
static_num_components = tensor_util.constant_value(num_components)
if static_num_components is None:
raise ValueError(
"Could not infer number of classes from cat and unable "
"to compare this value to the number of components passed in.")
# Possibly convert from numpy 0-D array.
static_num_components = int(static_num_components)
if static_num_components != len(components):
raise ValueError("cat.num_classes != len(components): %d vs. %d" %
(static_num_components, len(components)))
cat_batch_shape = cat.batch_shape_tensor()
cat_batch_rank = array_ops.size(cat_batch_shape)
if validate_args:
batch_shapes = [d.batch_shape_tensor() for d in components]
batch_ranks = [array_ops.size(bs) for bs in batch_shapes]
check_message = ("components[%d] batch shape must match cat "
"batch shape")
self._assertions = [
check_ops.assert_equal(
cat_batch_rank, batch_ranks[di], message=check_message % di)
for di in range(len(components))
]
self._assertions += [
check_ops.assert_equal(
cat_batch_shape, batch_shapes[di], message=check_message % di)
for di in range(len(components))
]
else:
self._assertions = []
self._cat = cat
self._components = list(components)
self._num_components = static_num_components
self._static_event_shape = static_event_shape
self._static_batch_shape = static_batch_shape
self._use_static_graph = use_static_graph
if use_static_graph and static_num_components is None:
raise ValueError("Number of categories must be known statically when "
"`static_sample=True`.")
# We let the Mixture distribution access _graph_parents since its arguably
# more like a baseclass.
graph_parents = self._cat._graph_parents # pylint: disable=protected-access
for c in self._components:
graph_parents += c._graph_parents # pylint: disable=protected-access
super(Mixture, self).__init__(
dtype=dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=graph_parents,
name=name)
@property
def cat(self):
return self._cat
@property
def components(self):
return self._components
@property
def num_components(self):
return self._num_components
def _batch_shape_tensor(self):
return self._cat.batch_shape_tensor()
def _batch_shape(self):
return self._static_batch_shape
def _event_shape_tensor(self):
return self._components[0].event_shape_tensor()
def _event_shape(self):
return self._static_event_shape
def _expand_to_event_rank(self, x):
"""Expand the rank of x up to static_event_rank times for broadcasting.
The static event rank was checked to not be None at construction time.
Args:
x: A tensor to expand.
Returns:
The expanded tensor.
"""
expanded_x = x
for _ in range(self.event_shape.ndims):
expanded_x = array_ops.expand_dims(expanded_x, -1)
return expanded_x
def _mean(self):
with ops.control_dependencies(self._assertions):
distribution_means = [d.mean() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
cat_probs = [self._expand_to_event_rank(c_p) for c_p in cat_probs]
partial_means = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_means)
]
# These should all be the same shape by virtue of matching
# batch_shape and event_shape.
return math_ops.add_n(partial_means)
def _stddev(self):
with ops.control_dependencies(self._assertions):
distribution_means = [d.mean() for d in self.components]
distribution_devs = [d.stddev() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
stacked_means = array_ops.stack(distribution_means, axis=-1)
stacked_devs = array_ops.stack(distribution_devs, axis=-1)
cat_probs = [self._expand_to_event_rank(c_p) for c_p in cat_probs]
broadcasted_cat_probs = (array_ops.stack(cat_probs, axis=-1) *
array_ops.ones_like(stacked_means))
batched_dev = distribution_utils.mixture_stddev(
array_ops.reshape(broadcasted_cat_probs, [-1, len(self.components)]),
array_ops.reshape(stacked_means, [-1, len(self.components)]),
array_ops.reshape(stacked_devs, [-1, len(self.components)]))
# I.e. re-shape to list(batch_shape) + list(event_shape).
return array_ops.reshape(batched_dev,
array_ops.shape(broadcasted_cat_probs)[:-1])
def _log_prob(self, x):
with ops.control_dependencies(self._assertions):
x = ops.convert_to_tensor(x, name="x")
distribution_log_probs = [d.log_prob(x) for d in self.components]
cat_log_probs = self._cat_probs(log_probs=True)
final_log_probs = [
cat_lp + d_lp
for (cat_lp, d_lp) in zip(cat_log_probs, distribution_log_probs)
]
concat_log_probs = array_ops.stack(final_log_probs, 0)
log_sum_exp = math_ops.reduce_logsumexp(concat_log_probs, [0])
return log_sum_exp
def _log_cdf(self, x):
with ops.control_dependencies(self._assertions):
x = ops.convert_to_tensor(x, name="x")
distribution_log_cdfs = [d.log_cdf(x) for d in self.components]
cat_log_probs = self._cat_probs(log_probs=True)
final_log_cdfs = [
cat_lp + d_lcdf
for (cat_lp, d_lcdf) in zip(cat_log_probs, distribution_log_cdfs)
]
concatted_log_cdfs = array_ops.stack(final_log_cdfs, axis=0)
mixture_log_cdf = math_ops.reduce_logsumexp(concatted_log_cdfs, [0])
return mixture_log_cdf
def _sample_n(self, n, seed=None):
if self._use_static_graph:
# This sampling approach is almost the same as the approach used by
# `MixtureSameFamily`. The differences are due to having a list of
# `Distribution` objects rather than a single object, and maintaining
# random seed management that is consistent with the non-static code path.
samples = []
cat_samples = self.cat.sample(n, seed=seed)
for c in range(self.num_components):
seed = distribution_util.gen_new_seed(seed, "mixture")
samples.append(self.components[c].sample(n, seed=seed))
x = array_ops.stack(
samples, -self._static_event_shape.ndims - 1) # [n, B, k, E]
npdt = x.dtype.as_numpy_dtype
mask = array_ops.one_hot(
indices=cat_samples, # [n, B]
depth=self._num_components, # == k
on_value=np.ones([], dtype=npdt),
off_value=np.zeros([], dtype=npdt)) # [n, B, k]
mask = distribution_utils.pad_mixture_dimensions(
mask, self, self._cat,
self._static_event_shape.ndims) # [n, B, k, [1]*e]
return math_ops.reduce_sum(
x * mask,
axis=-1 - self._static_event_shape.ndims) # [n, B, E]
with ops.control_dependencies(self._assertions):
n = ops.convert_to_tensor(n, name="n")
static_n = tensor_util.constant_value(n)
n = int(static_n) if static_n is not None else n
cat_samples = self.cat.sample(n, seed=seed)
static_samples_shape = cat_samples.get_shape()
if static_samples_shape.is_fully_defined():
samples_shape = static_samples_shape.as_list()
samples_size = static_samples_shape.num_elements()
else:
samples_shape = array_ops.shape(cat_samples)
samples_size = array_ops.size(cat_samples)
static_batch_shape = self.batch_shape
if static_batch_shape.is_fully_defined():
batch_shape = static_batch_shape.as_list()
batch_size = static_batch_shape.num_elements()
else:
batch_shape = self.batch_shape_tensor()
batch_size = math_ops.reduce_prod(batch_shape)
static_event_shape = self.event_shape
if static_event_shape.is_fully_defined():
event_shape = np.array(static_event_shape.as_list(), dtype=np.int32)
else:
event_shape = self.event_shape_tensor()
# Get indices into the raw cat sampling tensor. We will
# need these to stitch sample values back out after sampling
# within the component partitions.
samples_raw_indices = array_ops.reshape(
math_ops.range(0, samples_size), samples_shape)
# Partition the raw indices so that we can use
# dynamic_stitch later to reconstruct the samples from the
# known partitions.
partitioned_samples_indices = data_flow_ops.dynamic_partition(
data=samples_raw_indices,
partitions=cat_samples,
num_partitions=self.num_components)
# Copy the batch indices n times, as we will need to know
# these to pull out the appropriate rows within the
# component partitions.
batch_raw_indices = array_ops.reshape(
array_ops.tile(math_ops.range(0, batch_size), [n]), samples_shape)
# Explanation of the dynamic partitioning below:
# batch indices are i.e., [0, 1, 0, 1, 0, 1]
# Suppose partitions are:
# [1 1 0 0 1 1]
# After partitioning, batch indices are cut as:
# [batch_indices[x] for x in 2, 3]
# [batch_indices[x] for x in 0, 1, 4, 5]
# i.e.
# [1 1] and [0 0 0 0]
# Now we sample n=2 from part 0 and n=4 from part 1.
# For part 0 we want samples from batch entries 1, 1 (samples 0, 1),
# and for part 1 we want samples from batch entries 0, 0, 0, 0
# (samples 0, 1, 2, 3).
partitioned_batch_indices = data_flow_ops.dynamic_partition(
data=batch_raw_indices,
partitions=cat_samples,
num_partitions=self.num_components)
samples_class = [None for _ in range(self.num_components)]
for c in range(self.num_components):
n_class = array_ops.size(partitioned_samples_indices[c])
seed = distribution_util.gen_new_seed(seed, "mixture")
samples_class_c = self.components[c].sample(n_class, seed=seed)
# Pull out the correct batch entries from each index.
# To do this, we may have to flatten the batch shape.
# For sample s, batch element b of component c, we get the
# partitioned batch indices from
# partitioned_batch_indices[c]; and shift each element by
# the sample index. The final lookup can be thought of as
# a matrix gather along locations (s, b) in
# samples_class_c where the n_class rows correspond to
# samples within this component and the batch_size columns
# correspond to batch elements within the component.
#
# Thus the lookup index is
# lookup[c, i] = batch_size * s[i] + b[c, i]
# for i = 0 ... n_class[c] - 1.
lookup_partitioned_batch_indices = (
batch_size * math_ops.range(n_class) +
partitioned_batch_indices[c])
samples_class_c = array_ops.reshape(
samples_class_c,
array_ops.concat([[n_class * batch_size], event_shape], 0))
samples_class_c = array_ops.gather(
samples_class_c, lookup_partitioned_batch_indices,
name="samples_class_c_gather")
samples_class[c] = samples_class_c
# Stitch back together the samples across the components.
lhs_flat_ret = data_flow_ops.dynamic_stitch(
indices=partitioned_samples_indices, data=samples_class)
# Reshape back to proper sample, batch, and event shape.
ret = array_ops.reshape(lhs_flat_ret,
array_ops.concat([samples_shape,
self.event_shape_tensor()], 0))
ret.set_shape(
tensor_shape.TensorShape(static_samples_shape).concatenate(
self.event_shape))
return ret
def entropy_lower_bound(self, name="entropy_lower_bound"):
r"""A lower bound on the entropy of this mixture model.
The bound below is not always very tight, and its usefulness depends
on the mixture probabilities and the components in use.
A lower bound is useful for ELBO when the `Mixture` is the variational
distribution:
\\(
\log p(x) >= ELBO = \int q(z) \log p(x, z) dz + H[q]
\\)
where \\( p \\) is the prior distribution, \\( q \\) is the variational,
and \\( H[q] \\) is the entropy of \\( q \\). If there is a lower bound
\\( G[q] \\) such that \\( H[q] \geq G[q] \\) then it can be used in
place of \\( H[q] \\).
For a mixture of distributions \\( q(Z) = \sum_i c_i q_i(Z) \\) with
\\( \sum_i c_i = 1 \\), by the concavity of \\( f(x) = -x \log x \\), a
simple lower bound is:
\\(
\begin{align}
H[q] & = - \int q(z) \log q(z) dz \\\
& = - \int (\sum_i c_i q_i(z)) \log(\sum_i c_i q_i(z)) dz \\\
& \geq - \sum_i c_i \int q_i(z) \log q_i(z) dz \\\
& = \sum_i c_i H[q_i]
\end{align}
\\)
This is the term we calculate below for \\( G[q] \\).
Args:
name: A name for this operation (optional).
Returns:
A lower bound on the Mixture's entropy.
"""
with self._name_scope(name, values=[self.cat.logits]):
with ops.control_dependencies(self._assertions):
distribution_entropies = [d.entropy() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
partial_entropies = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_entropies)
]
# These are all the same shape by virtue of matching batch_shape
return math_ops.add_n(partial_entropies)
def _cat_probs(self, log_probs):
"""Get a list of num_components batchwise probabilities."""
which_softmax = nn_ops.log_softmax if log_probs else nn_ops.softmax
cat_probs = which_softmax(self.cat.logits)
cat_probs = array_ops.unstack(cat_probs, num=self.num_components, axis=-1)
return cat_probs
| apache-2.0 |
Barmaley-exe/scikit-learn | sklearn/linear_model/logistic.py | 3 | 55888 | """
Logistic Regression
"""
# Author: Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Manoj Kumar <[email protected]>
# Lars Buitinck
# Simon Wu <[email protected]>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from ..feature_selection.from_model import _LearntSelectorMixin
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
squared_norm)
from ..utils.optimize import newton_cg
from ..utils.validation import (as_float_array, DataConversionWarning,
check_X_y)
from ..utils.fixes import expit
from ..externals.joblib import Parallel, delayed
from ..cross_validation import _check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
return w, c, y * z
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
_, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_loss_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss, gradient and the Hessian.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return out, grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_loss_grad_hess(w, X, Y, alpha, sample_weight):
"""
Provides multinomial loss, gradient, and a function for computing hessian
vector product.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or (n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return loss, grad, hessp
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None, copy=True,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr'):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
copy : bool, default True
Whether or not to produce a copy of the data. Setting this to
True will be useful in cases, when logistic_regression_path
is called repeatedly with the same data, as y is modified
along the path.
class_weight : {dict, 'auto'}, optional
Over-/undersamples the samples of each class according to the given
weights. If not given, all classes are supposed to have weight one.
The 'auto' mode selects weights inversely proportional to class
frequencies in the training set.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
Notes
-----
You might get slighly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class can be either 'multinomial' or 'ovr'"
"got %s" % multi_class)
if solver not in ['liblinear', 'newton-cg', 'lbfgs']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg and lbfgs solvers. got %s" % solver)
if multi_class == 'multinomial' and solver == 'liblinear':
raise ValueError("Solver %s cannot solve problems with "
"a multinomial backend." % solver)
if solver != 'liblinear':
if penalty != 'l2':
raise ValueError("newton-cg and lbfgs solvers support only "
"l2 penalties, got %s penalty." % penalty)
if dual:
raise ValueError("newton-cg and lbfgs solvers support only "
"dual=False, got dual=%s" % dual)
# Preprocessing.
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, copy=copy, dtype=None)
_, n_features = X.shape
check_consistent_length(X, y)
classes = np.unique(y)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "auto", then
# the class_weights are assigned after masking the labels with a OvR.
sample_weight = np.ones(X.shape[0])
le = LabelEncoder()
if isinstance(class_weight, dict):
if solver == "liblinear":
if classes.size == 2:
# Reconstruct the weights with keys 1 and -1
temp = {1: class_weight[pos_class],
-1: class_weight[classes[0]]}
class_weight = temp.copy()
else:
raise ValueError("In LogisticRegressionCV the liblinear "
"solver cannot handle multiclass with "
"class_weight of type dict. Use the lbfgs, "
"newton-cg solvers or set "
"class_weight='auto'")
else:
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight = class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = [-1, 1]
mask = (y == pos_class)
y[mask] = 1
y[~mask] = -1
# To take care of object dtypes, i.e 1 and -1 are in the form of
# strings.
y = as_float_array(y, copy=False)
else:
lbin = LabelBinarizer()
Y_bin = lbin.fit_transform(y)
if Y_bin.shape[1] == 1:
Y_bin = np.hstack([1 - Y_bin, Y_bin])
w0 = np.zeros((Y_bin.shape[1], n_features + int(fit_intercept)),
order='F')
mask_classes = classes
if class_weight == "auto":
class_weight_ = compute_class_weight(class_weight, mask_classes, y)
sample_weight = class_weight_[le.fit_transform(y)]
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size)
)
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_vectors = classes.size
if n_vectors == 2:
n_vectors = 1
if (coef.shape[0] != n_vectors or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1
)
)
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
w0 = w0.ravel()
target = Y_bin
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_loss_grad_hess
else:
target = y
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_loss_grad_hess
coefs = list()
for C in Cs:
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter
)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol
)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0 = newton_cg(hess, func, grad, w0, args=args, maxiter=max_iter,
tol=tol)
elif solver == 'liblinear':
coef_, intercept_, _, = _fit_liblinear(
X, y, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol,
)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0)
return coefs, np.array(Cs)
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, copy=True, intercept_scaling=1.,
multi_class='ovr'):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable
For a list of scoring functions that can be used, look at
:mod:`sklearn.metrics`. The default scoring option used is
accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : {dict, 'auto'}, optional
Over-/undersamples the samples of each class according to the given
weights. If not given, all classes are supposed to have weight one.
The 'auto' mode selects weights inversely proportional to class
frequencies in the training set.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
copy : bool, default True
Whether or not to produce a copy of the data. Setting this to
True will be useful in cases, when ``_log_reg_scoring_path`` is called
repeatedly with the same data, as y is modified along the path.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
"""
log_reg = LogisticRegression(fit_intercept=fit_intercept)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test[mask] = 1
y_test[~mask] = -1
# To deal with object dtypes, we need to convert into an array of floats.
y_test = as_float_array(y_test, copy=False)
coefs, Cs = logistic_regression_path(X_train, y_train, Cs=Cs,
fit_intercept=fit_intercept,
solver=solver,
max_iter=max_iter,
class_weight=class_weight,
copy=copy, pos_class=pos_class,
multi_class=multi_class,
tol=tol, verbose=verbose,
dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling)
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores)
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr' and uses the
cross-entropy loss, if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs' and
'newton-cg' solvers.)
This class implements regularized logistic regression using the
`liblinear` library, newton-cg and lbfgs solvers. It can handle both
dense and sparse input. Use C-ordered arrays or CSR matrices containing
64-bit floats for optimal performance; any other input format will be
converted (and copied).
The newton-cg and lbfgs solvers support only L2 regularization with primal
formulation. The liblinear solver supports both L1 and L2 regularization,
with a dual formulation only for the L2 penalty.
Parameters
----------
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
intercept_scaling : float, default: 1
Useful only if solver is liblinear.
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'auto'}, optional
Over-/undersamples the samples of each class according to the given
weights. If not given, all classes are supposed to have weight one.
The 'auto' mode selects weights inversely proportional to class
frequencies in the training set.
max_iter : int
Useful only for the newton-cg and lbfgs solvers. Maximum number of
iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
intercept_ : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
n_iter_ : int
Maximum of the actual number of iterations across all classes.
Valid only for the liblinear solver.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
See also
--------
sklearn.linear_model.SGDClassifier
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64, order="C")
self.classes_ = np.unique(y)
if self.solver not in ['liblinear', 'newton-cg', 'lbfgs']:
raise ValueError(
"Logistic Regression supports only liblinear, newton-cg and "
"lbfgs solvers, Got solver=%s" % self.solver
)
if self.solver == 'liblinear' and self.multi_class == 'multinomial':
raise ValueError("Solver %s does not support a multinomial "
"backend." % self.solver)
if self.multi_class not in ['ovr', 'multinomial']:
raise ValueError("multi_class should be either ovr or multinomial "
"got %s" % self.multi_class)
if self.solver == 'liblinear':
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol
)
return self
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
for ind, class_ in enumerate(classes_):
coef_, _ = logistic_regression_path(
X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight)
self.coef_.append(coef_[0])
self.coef_ = np.squeeze(self.coef_)
# For the binary case, this get squeezed to a 1-D array.
if self.coef_.ndim == 1:
self.coef_ = self.coef_[np.newaxis, :]
self.coef_ = np.asarray(self.coef_)
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
return self._predict_proba_lr(X)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin, _LearntSelectorMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg or
LBFGS optimizer. The newton-cg and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so in general it is supposed to be faster.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
class_weight : {dict, 'auto'}, optional
Over-/undersamples the samples of each class according to the given
weights. If not given, all classes are supposed to have weight one.
The 'auto' mode selects weights inversely proportional to class
frequencies in the training set.
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.cross_validation` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : callabale
Scoring function to use as cross-validation criteria. For a list of
scoring functions that can be used, look at :mod:`sklearn.metrics`.
The default scoring option used is accuracy_score.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
class_weight : {dict, 'auto'}, optional
Over-/undersamples the samples of each class according to the given
weights. If not given, all classes are supposed to have weight one.
The 'auto' mode selects weights inversely proportional to class
frequencies in the training set.
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs'
solver.
intercept_scaling : float, default 1.
Useful only if solver is liblinear.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
`coef_` is readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True
and is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)``
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)`` depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr'):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
if self.solver != 'liblinear':
if self.penalty != 'l2':
raise ValueError("newton-cg and lbfgs solvers support only "
"l2 penalties.")
if self.dual:
raise ValueError("newton-cg and lbfgs solvers support only "
"the primal form.")
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, dtype=None)
if self.multi_class not in ['ovr', 'multinomial']:
raise ValueError("multi_class backend should be either "
"'ovr' or 'multinomial'"
" got %s" % self.multi_class)
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning
)
y = np.ravel(y)
check_consistent_length(X, y)
# init cross-validation generator
cv = _check_cv(self.cv, X, y, classifier=True)
folds = list(cv)
self._enc = LabelEncoder()
self._enc.fit(y)
labels = self.classes_ = np.unique(y)
n_classes = len(labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % self.classes_[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
labels = labels[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
iter_labels = labels
if self.multi_class == 'multinomial':
iter_labels = [None]
if self.class_weight and not(isinstance(self.class_weight, dict) or
self.class_weight == 'auto'):
raise ValueError("class_weight provided should be a "
"dict or 'auto'")
path_func = delayed(_log_reg_scoring_path)
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=self.class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling
)
for label in iter_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
else:
coefs_paths, Cs, scores = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.coefs_paths_ = dict(zip(labels, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(labels, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, label in enumerate(iter_labels):
if self.multi_class == 'ovr':
scores = self.scores_[label]
coefs_paths = self.coefs_paths_[label]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
w, _ = logistic_regression_path(
X, y, pos_class=label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
class_weight=self.class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1))
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([
coefs_paths[i][best_indices[i]]
for i in range(len(folds))
], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
| bsd-3-clause |
jcrudy/sklearntools | sklearntools/sym/adapters/isotonic_regression.py | 1 | 3641 | import bisect
from nose.tools import assert_almost_equal
from sympy.core.numbers import RealNumber
from sympy.functions.elementary.piecewise import Piecewise
from sklearntools.sym.syms import syms, register_syms
from sklearntools.sym.base import NAN
from sympy.core.symbol import Symbol
from sklearntools.sym.printers import model_to_code, exec_module
from sklearntools.sym.sym_predict import register_sym_predict
from sklearn.isotonic import IsotonicRegression
from numpy.testing.utils import assert_array_almost_equal
import numpy as np
from ..input_size import register_input_size
@register_syms(IsotonicRegression)
def syms_isotonic_regression(estimator):
return [Symbol('x')]
@register_input_size(IsotonicRegression)
def input_size_isotonic_regression(estimator):
return 1
def sym_linear_interp(variable, lower_x, upper_x, lower_y, upper_y):
slope = RealNumber((upper_y - lower_y) / (upper_x - lower_x))
return slope * (variable - RealNumber(lower_x)) + RealNumber(lower_y)
@register_sym_predict(IsotonicRegression)
def sym_predict_isotonic_regression(estimator):
variable = syms(estimator)[0]
pieces = []
try:
x_upper = estimator.f_.x[0]
y_upper = estimator.f_.y[0]
except AttributeError:
return RealNumber(estimator.f_(np.array([0.]))[0])
i = 0
n = len(estimator.f_.x)
if estimator.out_of_bounds == 'clip':
pieces.append((y_upper, variable < RealNumber(x_upper)))
elif estimator.out_of_bounds == 'nan':
pieces.append((NAN(), variable < RealNumber(x_upper)))
else:
raise ValueError('out_of_bounds=%s not supported.' % estimator.out_of_bounds)
while i < (n-1):
i += 1
x_lower = x_upper
y_lower = y_upper
x_upper = estimator.f_.x[i]
y_upper = estimator.f_.y[i]
pieces.append((sym_linear_interp(variable, x_lower, x_upper, y_lower, y_upper), (RealNumber(x_lower) <= variable) & (variable <= RealNumber(x_upper))))
if estimator.out_of_bounds == 'clip':
pieces.append((y_upper, variable >= RealNumber(x_upper)))
elif estimator.out_of_bounds == 'nan':
pieces.append((NAN(), variable > RealNumber(x_upper)))
else:
raise ValueError('out_of_bounds=%s not supported.' % estimator.out_of_bounds)
return Piecewise(*pieces)
def predict_isotonic(estimator, value):
i = bisect.bisect(estimator.f_.x, value)
if i == 0:
return estimator.f_.y[0]
elif i>=len(estimator.f_.y):
return estimator.f_.y[-1]
else:
lower_y = estimator.f_.y[i-1]
upper_y = estimator.f_.y[i]
lower_x = estimator.f_.x[i-1]
upper_x = estimator.f_.x[i]
slope = (upper_y - lower_y) / (upper_x - lower_x)
return lower_y + slope * (value - lower_x)
# if __name__ == '__main__':
# from sklearntools.calibration import IsotonicRegressor
# import numpy as np
# X = np.random.normal(size=1000) + 100
# y = np.random.normal(X ** 2, .1)
# estimator = IsotonicRegressor(out_of_bounds='clip').fit(X, y)
# for v in np.arange(-10,10,.1):
# assert_almost_equal(predict_isotonic(estimator, v), estimator.predict([v])[0])
#
# code = model_to_code(estimator, 'numpy', 'predict', 'test_model')
# numpy_test_module = exec_module('numpy_test_module', code)
# y_pred_numpy = numpy_test_module.test_model(x=np.ravel(np.asarray(X)))
# y_pred = estimator.predict(np.ravel(np.asarray(X)))
# y_pred_test = [predict_isotonic(estimator, v) for v in X]
# assert_array_almost_equal(np.ravel(y_pred_numpy), np.ravel(y_pred))
# print('Success!')
# | bsd-3-clause |
pratapvardhan/scikit-learn | sklearn/linear_model/passive_aggressive.py | 60 | 10566 | # Authors: Rob Zinkov, Mathieu Blondel
# License: BSD 3 clause
from .stochastic_gradient import BaseSGDClassifier
from .stochastic_gradient import BaseSGDRegressor
from .stochastic_gradient import DEFAULT_EPSILON
class PassiveAggressiveClassifier(BaseSGDClassifier):
"""Passive Aggressive Classifier
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
fit_intercept : bool, default=False
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
loss : string, optional
The loss function to be used:
hinge: equivalent to PA-I in the reference paper.
squared_hinge: equivalent to PA-II in the reference paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
.. versionadded:: 0.17
parameter *class_weight* to automatically weight samples.
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDClassifier
Perceptron
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="hinge", n_jobs=1, random_state=None,
warm_start=False, class_weight=None):
super(PassiveAggressiveClassifier, self).__init__(
penalty=None,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
eta0=1.0,
warm_start=warm_start,
class_weight=class_weight,
n_jobs=n_jobs)
self.C = C
self.loss = loss
def partial_fit(self, X, y, classes=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of the training data
y : numpy array of shape [n_samples]
Subset of the target values
classes : array, shape = [n_classes]
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight == 'balanced':
raise ValueError("class_weight 'balanced' is not supported for "
"partial_fit. For 'balanced' weights, use "
"`sklearn.utils.compute_class_weight` with "
"`class_weight='balanced'`. In place of y you "
"can use a large enough subset of the full "
"training set target to properly estimate the "
"class frequency distributions. Pass the "
"resulting weights as the class_weight "
"parameter.")
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr, n_iter=1,
classes=classes, sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_classes,n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [n_classes]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr,
coef_init=coef_init, intercept_init=intercept_init)
class PassiveAggressiveRegressor(BaseSGDRegressor):
"""Passive Aggressive Regressor
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
epsilon : float
If the difference between the current prediction and the correct label
is below this threshold, the model is not updated.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
loss : string, optional
The loss function to be used:
epsilon_insensitive: equivalent to PA-I in the reference paper.
squared_epsilon_insensitive: equivalent to PA-II in the reference
paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDRegressor
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="epsilon_insensitive",
epsilon=DEFAULT_EPSILON, random_state=None, warm_start=False):
super(PassiveAggressiveRegressor, self).__init__(
penalty=None,
l1_ratio=0,
epsilon=epsilon,
eta0=1.0,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
warm_start=warm_start)
self.C = C
self.loss = loss
def partial_fit(self, X, y):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of training data
y : numpy array of shape [n_samples]
Subset of target values
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr, n_iter=1,
sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [1]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr,
coef_init=coef_init,
intercept_init=intercept_init)
| bsd-3-clause |
henridwyer/scikit-learn | sklearn/feature_extraction/image.py | 263 | 17600 | """
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils import check_array, check_random_state
from ..utils.fixes import astype
from ..base import BaseEstimator
__all__ = ['PatchExtractor',
'extract_patches_2d',
'grid_to_graph',
'img_to_graph',
'reconstruct_from_patches_2d']
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x: integer
The size of the grid in the x direction.
n_y: integer
The size of the grid in the y direction.
n_z: integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.in1d(edges[0], inds),
np.in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = astype(mask, dtype=np.bool, copy=False)
mask = np.asarray(mask, dtype=np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
img : ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
----------
n_x : int
Dimension in x axis
n_y : int
Dimension in y axis
n_z : int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : dtype, optional, default int
The data of the returned sparse matrix. By default it is int
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
i_h : int
The image height
i_w : int
The image with
p_h : int
The height of a patch
p_w : int
The width of a patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, (numbers.Integral))
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, (numbers.Real))
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = [slice(None, None, st) for st in extraction_step]
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling to use if
`max_patches` is not None.
Returns
-------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print(patches.shape)
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image : array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h // 10, i_w // 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, patch_size, self.max_patches, self.random_state)
return patches
| bsd-3-clause |
sunyihuan326/DeltaLab | shuwei_fengge/practice_five/model/main.py | 1 | 2519 | # coding:utf-8
'''
Created on 2018/1/15.
@author: chk01
'''
from shuwei_fengge.practice_two.load_data.utils import *
from PIL import Image, ImageDraw
import tensorflow as tf
import matplotlib.pyplot as plt
def get_face_box(points):
X = points[:, 0]
Y = points[:, 1]
min_x = min(X)
max_x = max(X)
min_y = min(Y)
max_y = max(Y)
wid = max(max_y - min_y, max_x - min_x)
wid = 1.8 * wid
new_x = min_x - (wid - (max_x - min_x)) // 2
new_y = min_y - (wid - (max_y - min_y)) // 2
p = 0.2
region = [new_x, new_y - p * wid, new_x + wid, new_y + (1 - p) * wid]
return region, wid
def main():
img_path = '7.jpg'
image = Image.open(img_path).convert("L")
points = get_landmark72(img_path)
region, width = get_face_box(points)
new_x = region[0]
new_y = region[1]
res = np.array(image.crop(region).resize([64, 64]))
tt = np.squeeze(predict(res)).reshape(-1, 2) * width / 64 + [new_x, new_y]
plt.scatter(points[:, 0], -points[:, 1])
plt.scatter(tt[:, 0], -tt[:, 1])
plt.axis('equal')
plt.show()
drawSurface = ImageDraw.Draw(image)
landmark72 = tuple(tuple(t) for t in tt)
rr = tuple(tuple(t) for t in points)
drawSurface.line(rr[:13], fill=255, width=5)
# drawSurface.polygon([landmark72[2:5],landmark72[-3]], fill=255)
drawSurface.line(landmark72, fill=255,width=5)
image.save(img_path.replace('.jpg', 'res.png'))
image.show()
def predict(trX):
# file = '../data/face_top_9.mat'
# data = scio.loadmat(file)
tf.reset_default_graph()
# graph
saver = tf.train.import_meta_graph("save/model-2000-2.ckpt.meta")
# value
# a = tf.train.NewCheckpointReader('save/model.ckpt.index')
# saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, "save/model-2000-2.ckpt")
graph = tf.get_default_graph()
predict_op = graph.get_tensor_by_name("output/BiasAdd:0")
X = graph.get_tensor_by_name("Placeholder:0")
# dp = graph.get_tensor_by_name("Placeholder_2:0")
resY = predict_op.eval({X: trX.reshape(1, -1) / 255.})
# resY=[[31,10]]
print(resY)
# resY = [[14.34780979, 32.37727928, 17.39715767, 22.06736565, 23.70981216,
# 17.21895123, 29.31753731, 16.67663288, 31.93413925, 14.36086273,
# 48.92932129, 29.01085472, 45.96300888, 21.74747467, 42.84361649,
# 17.86888313, 34.78334045, 14.6940918]]
return resY
if __name__ == '__main__':
main()
| mit |
bretttegart/treadmill | lib/python/treadmill/cli/scheduler/__init__.py | 1 | 2585 | """Top level command for Treadmill reports.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import click
import pandas as pd
import tabulate
from six.moves import urllib_parse
from treadmill import cli
from treadmill import context
from treadmill import plugin_manager
from treadmill import restclient
def fetch_report(cell_api, report_type, match=None, partition=None):
"""Fetch a report of the given type and return it as a DataFrame."""
api_urls = context.GLOBAL.cell_api(cell_api)
path = '/scheduler/{}'.format(report_type)
query = {}
if match:
query['match'] = match
if partition:
query['partition'] = partition
if query:
path += '?' + urllib_parse.urlencode(query)
response = restclient.get(api_urls, path).json()
return pd.DataFrame(response['data'], columns=response['columns'])
def print_report(frame):
"""Pretty-print the report."""
if cli.OUTPUT_FORMAT is None:
frame.replace(True, ' ', inplace=True)
frame.replace(False, 'X', inplace=True)
dict_ = frame.to_dict(orient='split')
del dict_['index']
cli.out(
tabulate.tabulate(
dict_['data'], dict_['columns'], tablefmt='simple'
)
)
cli.echo_green('\nX: designates the factor that prohibits scheduling '
'the instance on the given server')
elif cli.OUTPUT_FORMAT == 'yaml':
fmt = plugin_manager.load('treadmill.formatters', 'yaml')
cli.out(fmt.format(frame.to_dict(orient='records')))
elif cli.OUTPUT_FORMAT == 'json':
cli.out(frame.to_json(orient='records'))
elif cli.OUTPUT_FORMAT == 'csv':
cli.out(frame.to_csv(index=False))
else:
cli.out(tabulate.tabulate(frame, frame.columns, tablefmt='simple'))
def init():
"""Return top level command handler."""
@click.group(cls=cli.make_commands(__name__))
@click.option(
'--cell',
help='Treadmill cell',
envvar='TREADMILL_CELL',
callback=cli.handle_context_opt,
expose_value=False,
required=True
)
@click.option(
'--api',
help='Cell API URL',
metavar='URL',
envvar='TREADMILL_CELLAPI'
)
@click.pass_context
def run(ctx, api):
"""Report scheduler state."""
if not ctx.obj:
ctx.obj = {} # Doesn't seem to exist in testing
ctx.obj['api'] = api
return run
| apache-2.0 |
daodaoliang/neural-network-animation | matplotlib/tests/test_backend_pdf.py | 10 | 2894 | # -*- encoding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import io
import os
import numpy as np
from matplotlib import cm, rcParams
from matplotlib import pyplot as plt
from matplotlib.testing.decorators import (image_comparison, knownfailureif,
cleanup)
if 'TRAVIS' not in os.environ:
@image_comparison(baseline_images=['pdf_use14corefonts'],
extensions=['pdf'])
def test_use14corefonts():
rcParams['pdf.use14corefonts'] = True
rcParams['font.family'] = 'sans-serif'
rcParams['font.size'] = 8
rcParams['font.sans-serif'] = ['Helvetica']
rcParams['pdf.compression'] = 0
text = '''A three-line text positioned just above a blue line
and containing some French characters and the euro symbol:
"Merci pépé pour les 10 €"'''
@cleanup
def test_type42():
rcParams['pdf.fonttype'] = 42
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, 2, 3])
fig.savefig(io.BytesIO())
@cleanup
def test_multipage_pagecount():
from matplotlib.backends.backend_pdf import PdfPages
with PdfPages(io.BytesIO()) as pdf:
assert pdf.get_pagecount() == 0
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, 2, 3])
fig.savefig(pdf, format="pdf")
assert pdf.get_pagecount() == 1
pdf.savefig()
assert pdf.get_pagecount() == 2
@cleanup
def test_multipage_keep_empty():
from matplotlib.backends.backend_pdf import PdfPages
from tempfile import NamedTemporaryFile
### test empty pdf files
# test that an empty pdf is left behind with keep_empty=True (default)
with NamedTemporaryFile(delete=False) as tmp:
with PdfPages(tmp) as pdf:
filename = pdf._file.fh.name
assert os.path.exists(filename)
os.remove(filename)
# test if an empty pdf is deleting itself afterwards with keep_empty=False
with PdfPages(filename, keep_empty=False) as pdf:
pass
assert not os.path.exists(filename)
### test pdf files with content, they should never be deleted
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, 2, 3])
# test that a non-empty pdf is left behind with keep_empty=True (default)
with NamedTemporaryFile(delete=False) as tmp:
with PdfPages(tmp) as pdf:
filename = pdf._file.fh.name
pdf.savefig()
assert os.path.exists(filename)
os.remove(filename)
# test that a non-empty pdf is left behind with keep_empty=False
with NamedTemporaryFile(delete=False) as tmp:
with PdfPages(tmp, keep_empty=False) as pdf:
filename = pdf._file.fh.name
pdf.savefig()
assert os.path.exists(filename)
os.remove(filename)
| mit |
ryfeus/lambda-packs | Pandas_numpy/source/pandas/io/parquet.py | 1 | 8906 | """ parquet compat """
from warnings import catch_warnings
from distutils.version import LooseVersion
from pandas import DataFrame, RangeIndex, Int64Index, get_option
from pandas.compat import string_types
from pandas.core.common import AbstractMethodError
from pandas.io.common import get_filepath_or_buffer
def get_engine(engine):
""" return our implementation """
if engine == 'auto':
engine = get_option('io.parquet.engine')
if engine == 'auto':
# try engines in this order
try:
return PyArrowImpl()
except ImportError:
pass
try:
return FastParquetImpl()
except ImportError:
pass
raise ImportError("Unable to find a usable engine; "
"tried using: 'pyarrow', 'fastparquet'.\n"
"pyarrow or fastparquet is required for parquet "
"support")
if engine not in ['pyarrow', 'fastparquet']:
raise ValueError("engine must be one of 'pyarrow', 'fastparquet'")
if engine == 'pyarrow':
return PyArrowImpl()
elif engine == 'fastparquet':
return FastParquetImpl()
class BaseImpl(object):
api = None # module
@staticmethod
def validate_dataframe(df):
if not isinstance(df, DataFrame):
raise ValueError("to_parquet only supports IO with DataFrames")
# must have value column names (strings only)
if df.columns.inferred_type not in {'string', 'unicode'}:
raise ValueError("parquet must have string column names")
# index level names must be strings
valid_names = all(
isinstance(name, string_types)
for name in df.index.names
if name is not None
)
if not valid_names:
raise ValueError("Index level names must be strings")
def write(self, df, path, compression, **kwargs):
raise AbstractMethodError(self)
def read(self, path, columns=None, **kwargs):
raise AbstractMethodError(self)
class PyArrowImpl(BaseImpl):
def __init__(self):
# since pandas is a dependency of pyarrow
# we need to import on first use
try:
import pyarrow
import pyarrow.parquet
except ImportError:
raise ImportError(
"pyarrow is required for parquet support\n\n"
"you can install via conda\n"
"conda install pyarrow -c conda-forge\n"
"\nor via pip\n"
"pip install -U pyarrow\n"
)
if LooseVersion(pyarrow.__version__) < '0.4.1':
raise ImportError(
"pyarrow >= 0.4.1 is required for parquet support\n\n"
"you can install via conda\n"
"conda install pyarrow -c conda-forge\n"
"\nor via pip\n"
"pip install -U pyarrow\n"
)
self._pyarrow_lt_060 = (
LooseVersion(pyarrow.__version__) < LooseVersion('0.6.0'))
self._pyarrow_lt_070 = (
LooseVersion(pyarrow.__version__) < LooseVersion('0.7.0'))
self.api = pyarrow
def write(self, df, path, compression='snappy',
coerce_timestamps='ms', **kwargs):
self.validate_dataframe(df)
if self._pyarrow_lt_070:
self._validate_write_lt_070(df)
path, _, _ = get_filepath_or_buffer(path)
if self._pyarrow_lt_060:
table = self.api.Table.from_pandas(df, timestamps_to_ms=True)
self.api.parquet.write_table(
table, path, compression=compression, **kwargs)
else:
table = self.api.Table.from_pandas(df)
self.api.parquet.write_table(
table, path, compression=compression,
coerce_timestamps=coerce_timestamps, **kwargs)
def read(self, path, columns=None, **kwargs):
path, _, _ = get_filepath_or_buffer(path)
if self._pyarrow_lt_070:
return self.api.parquet.read_pandas(path, columns=columns,
**kwargs).to_pandas()
kwargs['use_pandas_metadata'] = True
return self.api.parquet.read_table(path, columns=columns,
**kwargs).to_pandas()
def _validate_write_lt_070(self, df):
# Compatibility shim for pyarrow < 0.7.0
# TODO: Remove in pandas 0.22.0
from pandas.core.indexes.multi import MultiIndex
if isinstance(df.index, MultiIndex):
msg = (
"Multi-index DataFrames are only supported "
"with pyarrow >= 0.7.0"
)
raise ValueError(msg)
# Validate index
if not isinstance(df.index, Int64Index):
msg = (
"pyarrow < 0.7.0 does not support serializing {} for the "
"index; you can .reset_index() to make the index into "
"column(s), or install the latest version of pyarrow or "
"fastparquet."
)
raise ValueError(msg.format(type(df.index)))
if not df.index.equals(RangeIndex(len(df))):
raise ValueError(
"pyarrow < 0.7.0 does not support serializing a non-default "
"index; you can .reset_index() to make the index into "
"column(s), or install the latest version of pyarrow or "
"fastparquet."
)
if df.index.name is not None:
raise ValueError(
"pyarrow < 0.7.0 does not serialize indexes with a name; you "
"can set the index.name to None or install the latest version "
"of pyarrow or fastparquet."
)
class FastParquetImpl(BaseImpl):
def __init__(self):
# since pandas is a dependency of fastparquet
# we need to import on first use
try:
import fastparquet
except ImportError:
raise ImportError(
"fastparquet is required for parquet support\n\n"
"you can install via conda\n"
"conda install fastparquet -c conda-forge\n"
"\nor via pip\n"
"pip install -U fastparquet"
)
if LooseVersion(fastparquet.__version__) < '0.1.0':
raise ImportError(
"fastparquet >= 0.1.0 is required for parquet "
"support\n\n"
"you can install via conda\n"
"conda install fastparquet -c conda-forge\n"
"\nor via pip\n"
"pip install -U fastparquet"
)
self.api = fastparquet
def write(self, df, path, compression='snappy', **kwargs):
self.validate_dataframe(df)
# thriftpy/protocol/compact.py:339:
# DeprecationWarning: tostring() is deprecated.
# Use tobytes() instead.
path, _, _ = get_filepath_or_buffer(path)
with catch_warnings(record=True):
self.api.write(path, df,
compression=compression, **kwargs)
def read(self, path, columns=None, **kwargs):
path, _, _ = get_filepath_or_buffer(path)
parquet_file = self.api.ParquetFile(path)
return parquet_file.to_pandas(columns=columns, **kwargs)
def to_parquet(df, path, engine='auto', compression='snappy', **kwargs):
"""
Write a DataFrame to the parquet format.
Parameters
----------
df : DataFrame
path : string
File path
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet reader library to use. If 'auto', then the option
'io.parquet.engine' is used. If 'auto', then the first
library to be installed is used.
compression : str, optional, default 'snappy'
compression method, includes {'gzip', 'snappy', 'brotli'}
kwargs
Additional keyword arguments passed to the engine
"""
impl = get_engine(engine)
return impl.write(df, path, compression=compression, **kwargs)
def read_parquet(path, engine='auto', columns=None, **kwargs):
"""
Load a parquet object from the file path, returning a DataFrame.
.. versionadded 0.21.0
Parameters
----------
path : string
File path
columns: list, default=None
If not None, only these columns will be read from the file.
.. versionadded 0.21.1
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet reader library to use. If 'auto', then the option
'io.parquet.engine' is used. If 'auto', then the first
library to be installed is used.
kwargs are passed to the engine
Returns
-------
DataFrame
"""
impl = get_engine(engine)
return impl.read(path, columns=columns, **kwargs)
| mit |
JudoWill/ResearchNotebooks | TreeingFunctions.py | 1 | 22362 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
%load_ext autoreload
%autoreload 2
# <codecell>
from pandas import DataFrame, Series, merge, read_csv, MultiIndex, Index, concat
from subprocess import check_call
from tempfile import NamedTemporaryFile as NTF
import os, os.path
import numpy as np
from scipy.stats import ttest_ind
from itertools import groupby,combinations
from operator import itemgetter
from Bio import Phylo
import networkx
from random import shuffle
import csv, shlex, shutil
os.chdir('/home/will/Dropbox/HIVseqs/')
sys.path.append('/home/will/HIVReportGen/AnalysisCode/')
from SeqProcessTools import read_pat_seq_data, load_training_seq_data, align_seq_data_frame
# <codecell>
import glob
pat_files = glob.glob('/home/will/HIVReportGen/Data/PatientFasta/*.fasta')
pat_seq = read_pat_seq_data(pat_files, '/home/will/HIVReportGen/Data/BlastDB/ConBseqs.txt')
training_files = glob.glob('/home/will/HIVReportGen/Data/TrainingSequences/*.fasta')
training_data = load_training_seq_data(training_files)
align_lanl = align_seq_data_frame(training_data, '/home/will/HIVReportGen/Data/BlastDB/ConBseqs.txt')
all_seqs = concat([pat_seq, align_lanl])
# <codecell>
def get_pairwise_distances(seq_series, tree_file = None, seq_file = None):
if seq_file is None:
fasta_handle = NTF()
if tree_file is None:
tree_handle = NTF()
else:
tree_handle = open(tree_file, 'w')
for (pat, visit), seq in zip(seq_series.index, seq_series.values):
nheader = '%s-%s' % (pat, visit)
fasta_handle.write('>%s\n%s\n' % (nheader, ''.join(seq)))
fasta_handle.flush()
os.fsync(fasta_handle.fileno())
cmd = 'muscle -in %(ifile)s -tree2 %(treefile)s -gapopen -2.9'
cmdlist = shlex.split(cmd % {
'ifile':fasta_handle.name,
'treefile':tree_handle.name
})
t = check_call(cmdlist)
tree = Phylo.read(open(tree_handle.name), 'newick')
seq_names = tree.get_terminals()
dmat = {}
for p1, p2 in combinations(seq_names, 2):
d = tree.distance(p1, p2)
dmat[(p1.name, p2.name)] = d
dmat[(p2.name, p1.name)] = d
return dmat
def extract_region(seq_series, start, stop):
nseqs = seq_series.map(lambda x: x[start:stop])
return nseqs
# <codecell>
def check_distance_pvals(mat_data, trop_dict):
nreps = 500
frac = 0.5
g1dist = []
g2dist = []
for (key1, key2), dist in mat_data.items():
if trop_dict[key1] and trop_dict[key2]:
g1dist.append(dist)
elif not trop_dict[key1] and not trop_dict[key2]:
g2dist.append(dist)
nitems = int(min(frac*len(g1dist), frac*len(g2dist)))
_, raw_pval = ttest_ind(g1dist, g2dist)
cor_pvals = []
for _ in range(nreps):
shuffle(g1dist)
shuffle(g2dist)
_, pval = ttest_ind(g1dist[:nitems], g2dist[:nitems])
cor_pvals.append(pval)
return raw_pval, np.mean(cor_pvals), np.mean(g1dist), np.mean(g2dist), np.std(g1dist), np.std(g2dist)
# <codecell>
pssm_data = read_csv('/home/will/HIVReportGen/Data/TrainingSequences/pssm_data.csv', index_col = [0,1])
def decide_tropism(inval):
if inval < -6.95:
return True
elif inval > -2.88:
return False
return np.nan
tropism_data = pssm_data['score'].map(decide_tropism).dropna()
trop_dict = {}
for (pat, visit), val in zip(tropism_data.index, tropism_data.values):
trop_dict[pat+'-'+visit] = val
benj_selected = []
with open('BensTropismLabels.csv') as handle:
reader = csv.DictReader(handle)
for row in reader:
trop_dict['%s-%s' % (row['Patient ID'], row['Visit'])] = row['Prediction'] == 'TRUE'
benj_selected.append((row['Patient ID'], row['Visit']))
benj_selected_index = MultiIndex.from_tuples(benj_selected, names = ['Patient ID', 'Visit number'])
# <codecell>
pure_seqs = MultiIndex.from_tuples([
('A0001','R00'),#R5 XX
('A0107','R05'),#X4 XX
('A0017','R02'),#X4 XX
('AB286955','RN'),#R5 XX
('AB287367','RN'),#R5 XX
('AB480695','RN'),#X4 XX
('AB485642','RN'),#X4 XX
('AB604946','RN'),#X4 XX
('AF042101','RN'),#X4 XX
('AY835766','RN'),#R5 XX
('AY835779','RN'),#X4 XX
('AY352275','RN'),#X4
('AY970950','RN'),#R5 XX
('DQ358809','RN'),#R5 XX
('EF057102','RN'),#X4 XX
('EF363123','RN'),#R5 XX
('JQ316126','RN'),#X4 XX
('GU647196','RN'),#X4 XX
('DQ990880','RN'),#X4 XX
], names = ['Patient ID', 'Visit number'])
equal_pure_seqs = MultiIndex.from_tuples([
('A0001','R00'),#R5 XX
('A0107','R05'),#X4 XX
('A0017','R02'),#X4 XX
('AB286955','RN'),#R5 XX
('AB287367','RN'),#R5 XX
('AB480695','RN'),#X4 XX
('AB485642','RN'),#X4 XX
('AB604946','RN'),#X4 XX
('AF042101','RN'),#X4 XX
('AY835766','RN'),#R5 XX
('AY835779','RN'),#X4 XX
('AY352275','RN'),#X4
('AY970950','RN'),#R5 XX
('DQ358809','RN'),#R5 XX
('EF057102','RN'),#X4 XX
('EF363123','RN'),#R5 XX
('JQ316126','RN'),#X4 XX
#('GU647196','RN'),#X4 XX
('DQ990880','RN'),#X4 XX
], names = ['Patient ID', 'Visit number'])
# <codecell>
def make_tree_figure(wanted_seqs, trop_dict, tree_file):
mat_data = get_pairwise_distances(wanted_seqs, tree_file = tree_file)
tree = Phylo.read(open(tree_file), 'newick')
net = Phylo.to_networkx(tree)
node_mapping = {}
clade = 1
for node in net.nodes():
if node.name is None:
node_mapping[node] = 'Clade-%i' % clade
clade += 1
else:
node_mapping[node] = node.name
new_net = networkx.relabel_nodes(net, node_mapping)
colors = []
for node in new_net.nodes():
if node.startswith('Clade'):
colors.append('w')
elif trop_dict[node]:
colors.append('g')
elif not trop_dict[node]:
colors.append('r')
else:
print node
#print colors, len(colors), len(new_net.nodes())
pos = networkx.graphviz_layout(new_net, 'twopi')
networkx.draw_networkx(new_net, pos, with_labels = False, node_color = colors)
# <codecell>
check_regions = [#('Tat-seq-align', 'The Acidic domain', 0, 20),
#('Tat-seq-align', 'Cysteine rich domain', 21, 36),
#('Tat-seq-align', 'Core domain', 37, 47),
#('Tat-seq-align', 'TAR binding domain', 48, 56),
#('Tat-seq-align', 'Domain V-72', 57, 71),
#('Tat-seq-align', 'Domain V-86', 57, 85),
#('Tat-seq-align', 'Exon II 73', 72, 100),
#('Tat-seq-align', 'Exon II 87', 86, 100),
#('Tat-seq-align', 'Transactivation', 0, 47),
#('Tat-seq-align', 'Co-factor binding', 21, 48),
#('Tat-seq-align', 'SP1 binding', 29, 54),
#('Tat-seq-align', 'Basic Region', 48, 71),
#('Tat-seq-align', 'CEBP binding', 46, 66),
#('Tat-seq-align', 'NFAT binding', 0, 25),
#('Tat-seq-align', 'DNA-PK binding', 55, 100),
#('Vpr-seq-align', 'Nuclear localization', 10, 39),
#('Vpr-seq-align', 'Cell Cycle Progression', 14, 34),
#('Vpr-seq-align', 'Tansactivation', 13, 21),
#('Vpr-seq-align', 'Viron Packaging', 28, 39),
#('Vpr-seq-align', 'Nuclear localizations', 53, 74),
#('Vpr-seq-align', 'Transactivation', 73, 80),
#('Vpr-seq-align', 'G2 arrest', 74, 94),
#('Vpr-seq-align', 'DNA binding', 84, 92),
('LTR-seq-align', 'U3', 0, 455),
('LTR-seq-align', 'R', 456, 559),
('LTR-seq-align', 'U5', 560, 612),
('LTR-seq-align', 'TAR', 454, 544),
('LTR-seq-align', 'Integration', 0, 70),
('LTR-seq-align', 'AP1-COUPs', 60, 250),
('LTR-seq-align', 'CEBP-Lef-1s', 280, 330),
('LTR-seq-align', 'SP-sites', 376, 408),
('LTR-seq-align', 'AP1-CREB', 539, 616),
('LTR-seq-align', 'Pre-SP-I', 408, 454),
('LTR-seq-align', 'Pre-SP-I-upstream-half', 408, 431),
('LTR-seq-align', 'Pre-SP-I-downstream-half', 431, 454),
('LTR-seq-align', 'GC-Box', 376, 408),
('LTR-seq-align', 'SP-I', 398, 408),
('LTR-seq-align', 'SP-II', 387, 398),
('LTR-seq-align', 'SP-III', 376, 386),
('LTR-seq-align', 'NfKB-SP-III', 349, 386),
('LTR-seq-align', 'NfKB-II-SP-III', 362, 386),
('LTR-seq-align', 'CEBP-I-NF2', 337, 359),
('LTR-seq-align', 'ATF-CREB-CEBP', 329, 349),
('LTR-seq-align', 'LEF1-CREB', 317, 337),
('LTR-seq-align', 'LEF-1', 317, 330),
('LTR-seq-align', 'ATF-CREB',329, 337),
('LTR-seq-align', 'CEBP-I', 337, 344),
('LTR-seq-align', 'ETS-1', 304, 313),
('LTR-seq-align', 'CEBP-II-USF-1', 280, 294),
('LTR-seq-align', 'AP-I-to-CEBP-II', 221, 280),
('LTR-seq-align', 'AP-I-promixal-half', 221, 251),
('LTR-seq-align', 'CEBP-II-promixal-half', 251, 280),
('LTR-seq-align', 'AP-I', 213, 221),
('LTR-seq-align', 'GRE', 191, 207),
('LTR-seq-align', 'AP-II-to-GRE', 162, 191),
('LTR-seq-align', 'AP-II', 154, 162),
('LTR-seq-align', 'COUP-to-AP-II', 131, 154),
('LTR-seq-align', 'COUP', 93, 131),
('LTR-seq-align', 'Pre-COUP', 0, 93),
('LTR-seq-align', 'Pre-COUP-upstream-half', 0, 45),
('LTR-seq-align', 'Pre-COUP-downstream-half', 45, 93),
('LTR-seq-align', 'NfKB-I', 362, 373),
('LTR-seq-align', 'NfKB-II', 349, 359),
('LTR-seq-align', 'NfKB-I-NfKB-II', 349, 373),
('LTR-seq-align', 'CEBP-I', 337, 349),
('LTR-seq-align', 'CEBP-II', 280, 289),
('LTR-seq-align', 'COUP-I', 116, 130),
('LTR-seq-align', 'COUP-II', 105, 124),
('LTR-seq-align', 'COUP-III', 92, 111),
('LTR-seq-align', 'AP-III', 118, 125),
('LTR-seq-align', 'AP-IV', 103, 110),
]
indexes = [#('All LANL Seqs', tropism_data.index),
#('BenJ Selected', benj_selected_index),
#('Benj Pure Seq', pure_seqs),
('Benj Pure Equal Seq', equal_pure_seqs),
]
# <codecell>
from itertools import product
results = []
for (ind_name, inds), (seq_col, name, start, stop) in product(indexes, check_regions):
wanted = extract_region(all_seqs.ix[inds][seq_col].dropna(), start, stop)
#print wanted.index
#print('Treeing')
prot_name = seq_col.split('-')[0]
treename = 'fixeddomaintrees/%s-%s-%s-%i.nwk' % (ind_name, prot_name, name, start)
treename = treename.replace(' ', '-')
mat_data = get_pairwise_distances(wanted, tree_file=treename)
#print('Testing')
raw_p, cor_p, r5mean, x4mean, r5std, x4std = check_distance_pvals(mat_data, trop_dict)
if seq_col.startswith('LTR'):
start = start-454
stop = stop-454
print ind_name, name, start, raw_p, cor_p
results.append((ind_name, prot_name, name,start+1, stop+1, cor_p, r5mean, x4mean, r5std, x4std))
#pat_wanted = extract_region(pat_seq.ix[tropism_data.index][seq_col].dropna(), start, stop)
#fname = '/home/will/BenSeqs/Trees/' + name.replace(' ', '-')
#plt.figure(figsize = (20,20))
#make_tree_figure(wanted, trop_dict, fname + '.tree')
#plt.title(name)
#plt.savefig( fname + '_pure_seqs.png')
# <codecell>
with open('fixed_domain_analysis_with_new_ltr.csv', 'w') as handle:
writer = csv.writer(handle)
fields = ['Sequence Set', 'Protein Name', 'Domain Name', 'Region Start', 'Region Stop', 'p-value', 'R5-Mean', 'X4-Mean', 'R5-std', 'X4-std']
writer.writerow(fields)
writer.writerows(results)
# <codecell>
#widths = [5,10,15,20,25,30,35,45,50]
#indexes = [('BenJ Selected', benj_selected_index),
# ('All LANL Seqs', tropism_data.index),
# ]
#('Benj Pure Seq', pure_seqs)
#['Sequence Set', 'Protein Name', width, 'midpoint', 'p-value']
#large_results = []
#prots = [('Vpr-seq-align', 'Vpr',range(96))]
#for (ind_name, inds), width, (seq_col, prot, positions) in product(indexes, widths, prots):
#
# for mid in positions:
# print ind_name, width, prot, mid
# start = max(int(mid-(width/2)),0)
# stop = min(int(mid+(width/2)),positions[-1])
# wanted = extract_region(all_seqs.ix[inds][seq_col].dropna(), start, stop)
# mat_data = get_pairwise_distances(wanted)
# raw_p, cor_p, r5mean, x4mean = check_distance_pvals(mat_data, trop_dict)
#
# large_results.append((ind_name, prot, width,start+1, stop+1, cor_p, r5mean, x4mean))
# <codecell>
import contextlib
from tempfile import mkdtemp
@contextlib.contextmanager
def tmp_directory(*args, **kwargs):
"""A context manager which changes the working directory to the given
path, and then changes it back to its previous value on exit.
"""
path = mkdtemp(*args, **kwargs)
try:
yield path + '/'
finally:
#shutil.rmtree(path)
pass
# <codecell>
from StringIO import StringIO
from subprocess import check_output
from Bio.Seq import Seq
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import IUPAC
def write_nexus_alignment(seq_series, handle):
seqs = []
tmp_handle = StringIO()
for (pi, vn), seq in zip(seq_series.index, seq_series.values):
nseq = ''.join(seq).replace('O', '-')
bseq = SeqRecord(Seq(nseq, alphabet=IUPAC.protein), id = '%s-%s' % (pi, vn))
seqs.append(bseq)
SeqIO.write(seqs, tmp_handle, 'nexus')
tmp_handle.seek(0)
strdata = tmp_handle.read().replace("'", '')
handle.write(strdata)
def write_mrbayes_commands(handle, alignment_path, output_path):
cmd = """begin mrbayes;
set autoclose=yes nowarn=yes;
execute %(align)s;
prset aamodelpr = mixed;
mcmc nchains = 3 ngen = 50000 samplefreq=1000 diagnfreq=100000 printfreq=100000 file=%(out)s;
sump;
sumt;
end;"""
tdict = {'align':alignment_path, 'out':output_path}
handle.write(cmd % tdict)
def run_mrbayes(cmd_path):
cmd = '/home/will/mb ' + cmd_path
check_output(shlex.split(cmd))
def reformat_nexus(inhandle, outhandle, trop_dict):
def process_tree_line(line):
parts = line.strip().split()
return 'tree %s [&R] %s\n' % (parts[1], parts[-1])
for line in inhandle: #get ri of junk
if line.strip() == 'begin trees;':
break
_ = inhandle.next() #get rid of the 'translate' line
outhandle.write('#NEXUS\n\n\n')
outhandle.write('begin states;\n')
for line in inhandle:
nline = line.strip()
if nline.startswith('tree'):
first_tree = process_tree_line(line)
break
num, seqname = nline[:-1].split(' ', 1)
try:
if trop_dict[seqname.replace('.copy', '')]:
trop = 'R5'
else:
trop = 'X4'
except KeyError:
print 'Missing ' + seqname + ' !!'
trop = 'R5'
outhandle.write('%s %s\n' % (num, trop))
outhandle.write('End;\n\n')
outhandle.write('begin trees;\n')
tree_lines = [first_tree] + [process_tree_line(line) for line in inhandle if line.strip() != 'end;']
for line in tree_lines:
outhandle.write(line)
outhandle.write('end;\n')
def run_bats(formated_nexus_path, nreps = 5000):
cmd = 'java -Xmx3000M -jar /home/will/BaTS_beta_build2.jar single %s %i %i'
out = check_output(shlex.split(cmd % (formated_nexus_path, nreps, 2)))
handle = StringIO(out)
for line in handle:
if line.startswith('Stat'):
headers = line.strip().split('\t')
break
return list(csv.DictReader(handle, fieldnames=headers, delimiter = '\t'))[:-2]
def run_MrBats_analysis(seq_series, trop_dict, tree_file):
with tmp_directory(dir = '/home/will/tmpstuf/') as tmpdir:
align_file = tmpdir + 'seqalign.nxs'
mrbayes_cmd_file = tmpdir + 'analysis.nxs'
cons_file = tmpdir + 'seqalign.nxs.con.tre'
multi_prob = tmpdir + 'seqalign.nxs.trprobs'
multi_mod = tmpdir + 'seqalign.nxs.trprobs.modified'
#print align_file
with open(align_file, 'w') as handle:
#print align_file, len(seq_series)
write_nexus_alignment(seq_series, handle)
with open(mrbayes_cmd_file, 'w') as handle:
write_mrbayes_commands(handle, align_file, align_file)
run_mrbayes(mrbayes_cmd_file)
with open(multi_prob) as inhandle:
with open(multi_mod, 'w') as ohandle:
reformat_nexus(inhandle, ohandle, trop_dict)
out = run_bats(multi_mod)
#out = [{}]
if tree_file:
shutil.copy(cons_file, tree_file)
return out
# <codecell>
from copy import deepcopy
from concurrent.futures import ThreadPoolExecutor
from itertools import chain, imap, product
indexes = [#('BenJ Selected', benj_selected_index),
('Benj Pure Seq', pure_seqs),
('Benj Pure Equal Seq', equal_pure_seqs),
#('All LANL Seqs', tropism_data.index),
]
def linker_code(tup):
wanted_seqs, treename, extra_dict = tup
out = run_MrBats_analysis(wanted_seqs, trop_dict, tree_file = treename)
final = []
for row in out:
row.update(extra_dict)
final.append(row)
return final
#(ind_name, prot_name, name,start+1, stop+1, cor_p, r5mean, x4mean)
new_method_inputs = []
for (ind_name, inds), (seq_col, name, start, stop) in product(indexes, check_regions):
wanted = extract_region(all_seqs.ix[inds][seq_col].dropna(), start, stop)
#print wanted.index
#print('Treeing')
prot_name = seq_col.split('-')[0]
treename = 'newdomaintrees/%s-%s-%s-%i.nwk' % (ind_name, prot_name, name, start)
extra_dict = {
'IndName':ind_name,
'ProtName':prot_name,
'Domain':name,
'Start':start,
'Stop':stop,
}
treename = treename.replace(' ', '-')
#print treename, len(wanted)
if len(wanted)>10:
new_method_inputs.append((wanted.copy(), treename, deepcopy(extra_dict)))
#raise KeyError
#results_so_far = []
with ThreadPoolExecutor(max_workers = 30) as executor:
res = executor.map(linker_code, new_method_inputs)
for row in chain.from_iterable(res):
print row['Domain'], row['IndName'], row['Statistic'] ,row['significance']
#results_so_far.append(row)
# <codecell>
tmp = DataFrame(results_so_far)
tmp.to_csv('new_method_results.csv')
# <codecell>
from itertools import islice
widths = [5,10,15,20,25,30,35,45,50]
indexes = [('BenJ Selected', benj_selected_index),
('All LANL Seqs', tropism_data.index),
('Benj Pure Seq', pure_seqs),
]
prots = [('Vpr-seq-align', 'Vpr',range(96))]
def take(n, iterable):
"Return first n items of the iterable as a list"
return list(islice(iterable, n))
def yield_regions(indexes, widths, prots):
for (ind_name, inds), width, (seq_col, prot, positions) in product(indexes, widths, prots):
for mid in positions:
#print ind_name, width, prot, mid
start = max(int(mid-(width/2)),0)
stop = min(int(mid+(width/2)),positions[-1])
wanted = extract_region(all_seqs.ix[inds][seq_col].dropna(), start, stop)
extra_dict = {
'IndName':ind_name,
'ProtName':prot,
'Start':start,
'Stop':stop,
'Mid':mid,
'width':width
}
if len(wanted)>10:
yield (wanted.copy(), None, deepcopy(extra_dict))
#['Sequence Set', 'Protein Name', 'width', 'midpoint', 'p-value']
vpr_window_results = []
block_size = 500
with ThreadPoolExecutor(max_workers = 30) as executor:
iterable = yield_regions(indexes, widths, prots)
block = take(block_size, iterable)
while block:
res = executor.map(linker_code, block)
for row in chain.from_iterable(res):
print row['width'], row['IndName'], row['Statistic'] ,row['significance']
vpr_window_results.append(row)
block = take(block_size, iterable)
# <codecell>
windowed_df = DataFrame(vpr_window_results)
# <codecell>
windowed_df.head()
# <codecell>
import dendropy
fixtrees = glob.glob('newdomaintrees/*.nwk')
for f in fixtrees:
if 'Equal' not in f:
continue
with open(f) as handle:
tree = dendropy.Tree.get_from_stream(open(f), 'nexus')
tree.deroot()
rmnodes = [tree.prune_subtree(t, update_splits = True) for t in tree.leaf_nodes() if t.get_node_str().endswith("copy'")]
#tree.prune_taxa(rmnodes)
nf = f.replace('newdomaintrees', 'unrootedtrees-equal')
with open(nf, 'w') as handle:
tree.write_to_stream(handle, 'newick')
# <codecell>
tmp = list(tree.leaf_nodes())
# <codecell>
labels = [n.get_node_str() for n in rmnodes]
tree.prune_taxa_with_labels(labels, update_splits = True)
# <codecell>
[tree.prune_subtree(t) for t in tree.leaf_nodes() if t.get_node_str().endswith("copy'")]
# <codecell>
with open('/home/will/tmpstuf/tmpyw6FXN/seqalign.nxs.trprobs') as handle:
treeL = dendropy.TreeList.get_from_stream(handle, 'nexus')
# <codecell>
print treeL[0].description()
# <codecell>
with open('/home/will/tmpstuf/tmpyw6FXN/tmptree.nxs', 'w') as handle:
treeL.write_to_stream(handle, 'nexus')
# <codecell>
for tree in treeL:
for leaf in tree.leaf_iter():
print str(leaf.taxon)
# <codecell>
| mit |
glenn20/mollie-robot | rpi/rpipidtune.py | 1 | 4575 | #!/usr/bin/env python
"""
An interface to manage an Arduino controlled robot.
Classes:
ArduinoRobot: The manager of the Arduino robot.
"""
from __future__ import print_function
import itertools
import time
import json
import sys, tty, termios
import paho.mqtt.client as mqtt
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
class RobotState:
def __init__( self ):
self.time = 0
self.head = [0,0]
self.setspeed = [0,0]
self.speed = [0,0]
self.counts = [0,0]
self.power = [0,0]
self.pid = [0.7, 0.0, 0.0 ]
def update( self, s ):
print( "Line =", s, end="\r\n" )
d = json.loads( s )
self.__dict__.update( d )
# print( "Robot =", self.__dict__, end="\r\n" )
return d
def listofvalues( self ):
return list(
itertools.chain(
[self.time],
self.head,
self.setspeed,
self.speed,
self.counts,
self.power,
self.pid
)
)
def listofkeys( self ):
return [
"time",
"headX", "headY",
"setspeedL", "setspeedR",
"speedL", "speedR",
"countsL", "countsR",
"powerL", "powerR",
"Kp", "Ki", "Kd"
]
def state( self, d ):
s = json.dumps( d, separators=(',',':') )
self.__dict__.update( d )
self.time = time.time()
# print( "Target=", self.__dict__, end="\r\n" )
return s
def json( self ):
return json.dumps( self.__dict__, separators=(',',':') )
class MqRobot( mqtt.Client ):
"""
A simple wrapper to send robot status messages to the MQTT broker
"""
def __init__( self, clientid = None ):
super( MqRobot, self ).__init__( clientid )
self.robotstate = RobotState()
self.df = pd.DataFrame( columns=self.robotstate.listofkeys() )
self.on_connect = self._on_connect
self.on_subscribe = self._on_subscribe
self.on_message = self._on_message
self.savedata = False
# self.on_log = self._on_log
self.connect( "192.168.0.30", 1883, 60 )
self.loop_start()
self.send( {"power": [0,0], "setspeed": [0,0]} )
self.send( {"pid": [0.7, 0.0, 0.0]} )
plt.ion()
def _on_connect( self, mqttc, obj, flags, rc ):
print( "MQTT: Connected: rc: " + str( rc ),
end="\r\n" )
self.subscribe( "/mollie-robot/state", 1 )
def _on_subscribe( self, mqttc, obj, mid, granted_qos ):
print( "MQTT: Subscribed: " + str( mid ) + " " + str( granted_qos ),
end="\r\n" )
def _on_message( self, mqttc, obj, msg ):
#print( "MQTT: " + msg.topic + " " +
# str( msg.qos ) + " " + str( msg.payload ),
# end="\r\n" )
# Update the robot state
self.robotstate.state( json.loads( msg.payload ) )
if (self.savedata):
self.df.loc[len(self.df)] = self.robotstate.listofvalues()
def _on_log( self, mqttc, obj, level, string ):
print( "MQTT: " + string )
def close( self ):
self.loop_stop()
self.disconnect()
def update( self, state ):
return self.publish( "/mollie-robot/target", state, qos=0, retain=True )
def send( self, d ):
return self.update( json.dumps( d, separators=(',',':') ) )
def pidcheck( self, K ):
self.df = pd.DataFrame( columns=self.robotstate.listofkeys() )
self.savedata = True
self.send( {"pid": [K[0], K[1], K[2]]} )
self.send( {"setspeed": [6, 6]} )
time.sleep( 4 )
self.send( {"setspeed": [18, 18]} )
time.sleep( 5 )
self.send( {"setspeed": [6, 6]} )
time.sleep( 5 )
self.savedata = False
self.send( {"setspeed": [0, 0]} )
self.df.plot(
x="time",
y=["setspeedR", "speedR", "powerR"],
secondary_y=["powerR"]
#, linestyle='-', marker='o'
)
self.df.plot(
x="time",
y=["setspeedL", "speedL", "powerL"],
secondary_y=["powerL"]
#, style="o-"
)
plt.draw()
# self.df.to_csv( "pidtune-%.1f-%.1f-%.1f.csv" % (K[0], K[1], K[2]) )
if __name__ == "__main__":
robbie = MqRobot()
robbie.pidcheck( [4.0, 0.5, 1.0] )
robbie.close()
| gpl-2.0 |
walterreade/scikit-learn | sklearn/neighbors/tests/test_ball_tree.py | 159 | 10196 | import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
rng = np.random.RandomState(10)
V = rng.rand(3, 3)
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=np.random.random(DIMENSION)),
'wminkowski': dict(p=3, w=np.random.random(DIMENSION)),
'mahalanobis': dict(V=V)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_ball_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
bt = BallTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = bt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_ball_tree_query_boolean_metrics():
np.random.seed(0)
X = np.random.random((40, 10)).round(0)
Y = np.random.random((10, 10)).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in BOOLEAN_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_discrete_metrics():
np.random.seed(0)
X = (4 * np.random.random((40, 10))).round(0)
Y = (4 * np.random.random((10, 10))).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in DISCRETE_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = bt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_ball_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = bt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_ball_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
bt = BallTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true,
atol=atol, rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
bt = BallTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old version of scipy, doesn't accept "
"explicit bandwidth.")
dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_bt, dens_gkde, decimal=3)
def test_ball_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
bt = BallTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_ball_tree_pickle():
np.random.seed(0)
X = np.random.random((10, 3))
bt1 = BallTree(X, leaf_size=1)
# Test if BallTree with callable metric is picklable
bt1_pyfunc = BallTree(X, metric=dist_func, leaf_size=1, p=2)
ind1, dist1 = bt1.query(X)
ind1_pyfunc, dist1_pyfunc = bt1_pyfunc.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(bt1, protocol=protocol)
bt2 = pickle.loads(s)
s_pyfunc = pickle.dumps(bt1_pyfunc, protocol=protocol)
bt2_pyfunc = pickle.loads(s_pyfunc)
ind2, dist2 = bt2.query(X)
ind2_pyfunc, dist2_pyfunc = bt2_pyfunc.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1_pyfunc, ind2_pyfunc)
assert_array_almost_equal(dist1_pyfunc, dist2_pyfunc)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
def test_query_haversine():
np.random.seed(0)
X = 2 * np.pi * np.random.random((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
| bsd-3-clause |
rnowling/pop-gen-models | bernoulli_nb/bnb_predict.py | 1 | 1645 | import sys
from sklearn.naive_bayes import BernoulliNB as BNB
import matplotlib.pyplot as plt
import numpy as np
def read_variants(flname):
fl = open(flname)
markers = []
individuals = []
population_ids = []
population = -1
for ln in fl:
if "Marker" in ln:
if len(individuals) == 0:
continue
marker = dict()
marker["individuals"] = np.array(individuals)
marker["population_labels"] = np.array(population_ids)
markers.append(marker)
population = -1
population_ids = []
individuals = []
elif "Population" in ln:
population += 1
else:
individual = map(float, ln.strip().split())
individuals.append(individual)
population_ids.append(population)
if len(individuals) != 0:
marker = dict()
marker["individuals"] = np.array(individuals)
marker["population_labels"] = np.array(population_ids)
markers.append(marker)
fl.close()
return markers
def predict_scores(markers, threshold=0.05):
scores = []
for i, marker in enumerate(markers):
try:
bnb = BNB()
bnb.fit(marker["individuals"], marker["population_labels"])
scores.append((bnb.score(marker["individuals"], marker["population_labels"]), i))
except:
scores.append((0.0, i))
scores.sort()
scores.reverse()
cutoff_idx = int(threshold * len(scores))
return scores[:cutoff_idx]
def write_scores(scores, flname):
fl = open(flname, "w")
for loci, score in scores:
fl.write("%s %s\n" % (loci, score))
fl.close()
if __name__ == "__main__":
variants_fl = sys.argv[1]
scores_flname = sys.argv[2]
variants = read_variants(variants_fl)
scores = predict_scores(variants)
write_scores(scores, scores_flname)
| apache-2.0 |
Erotemic/ibeis | ibeis/algo/hots/toy_nan_rf.py | 1 | 12134 | import numpy as np
import utool as ut
def get_toydata(rng):
if ut.get_argflag('--toy2'):
X_true, X, y = toydata2(rng)
else:
X_true, X, y = toydata1(rng)
return X_true, X, y
def toydata2(rng):
from sklearn.datasets import samples_generator
n_samples = 1000
n_features = 2
n_classes = 2
n_informative = 2
n_clusters_per_class = int((2 ** n_informative) // n_classes)
hypercube = False
samplekw = dict(
flip_y=0.00,
class_sep=1.0,
shift=[-10, 10],
scale=1.0,
n_redundant=0,
n_repeated=0,
hypercube=hypercube, n_samples=n_samples, n_informative=n_informative,
n_classes=n_classes, n_clusters_per_class=n_clusters_per_class,
weights=None, shuffle=True, n_features=n_features, random_state=rng)
X_true, y = samples_generator.make_classification(**samplekw)
with_extra = ut.get_argflag('--extra')
# make very informative nan dimension
if with_extra:
n_informative_nan = 100
# extra_x = (rng.randn(n_informative_nan, 2) / 2 + [[12, -8]])
extra_x = (rng.randn(n_informative_nan, 2) / 2 + [[10, -12]])
X_true = np.vstack((X_true, extra_x))
y = np.append(y, [0] * n_informative_nan)
# Randomly drop datapoints
X = X_true.copy()
nanrate = ut.get_argval('--nanrate', default=.01)
if nanrate:
# TODO:
# * informative nan
# * random nan
# * random nan + informative nan
X.ravel()[rng.rand(X.size) < nanrate] = np.nan
if with_extra:
if True:
X.T[1][-n_informative_nan:] = np.nan
else:
X.T[0][-n_informative_nan:-n_informative_nan // 2] = np.nan
X.T[1][-n_informative_nan // 2:] = np.nan
return X_true, X, y
def toydata1(rng):
"""
**Description of Plot**
You'll notice that there are 4 plots. This is necessary to visualize a grid
with nans. Each plot shows points in the 2-dimensional grid with corners at
(0, 0) and (40, 40). The top left plot has these coordinates labeled. The
other 3 plots correspond to the top left grid, but in these plots at least
one of the dimensions has been "nanned". In the top right the x-dimension
is "nanned". In the bottom left the y-dimension is "nanned", and in the
bottom right both dimensions are "nanned". Even though all plots are drawn
as a 2d-surface only the topleft plot is truly a surface with 2 degrees of
freedom. The top right and bottom left plots are really lines with 1 degree
of freedom, and the bottom right plot is actually just a single point with
0 degrees of freedom.
In this example I create 10 Gaussian blobs where the first 9 have their
means laid out in a 3x3 grid and the last one has its mean in the center,
but I gave it a high standard deviation. I'll refer to the high std cluster
as 9, and label the other clusters at the grid means (to agree with the
demo code) like this:
```
6 7 8
3 4 5
0 1 2
```
Looking at the top left plot you can see clusters 0, 1, 2, 4, 6, and 8. The
reason the other cluster do not appear in this grid is because I've set at
least one of their dimensions to be nan. Specifically, cluster 3 had its y
dimension set to nan; cluster 5 and 7 had their x dimension set to nan; and
cluster 9 had both x and y dimensions set to nan.
For clusters 3, 5, and 7, I plot "nanned" points as lines along the nanned
dimension to show that only the non-nan dimensions can be used to
distinguish these points. I also plot the original position before I
"nanned" it for visualization purposes, but the learning algorithm never
sees this. For cluster 9, I only plot the original positions because all of
this data collapses to a single point [nan, nan].
Red points are of class 0, and blue points are of class 1. Points in each
plot represent the training data. The colored background of each plot
represents the classification surface.
"""
from sklearn.datasets import samples_generator
import functools
step = 20
n_samples = 100
blob = functools.partial(samples_generator.make_blobs, n_samples=n_samples,
random_state=rng)
Xy_blobs = [
(0, blob(centers=[[0 * step, 0 * step]])[0]),
(1, blob(centers=[[1 * step, 0 * step]])[0]),
(0, blob(centers=[[2 * step, 0 * step]])[0]),
(1, blob(centers=[[0 * step, 1 * step]])[0]),
(0, blob(centers=[[1 * step, 1 * step]])[0]),
(0, blob(centers=[[2 * step, 1 * step]])[0]),
(0, blob(centers=[[0 * step, 2 * step]])[0]),
(1, blob(centers=[[1 * step, 2 * step]])[0]),
(0, blob(centers=[[2 * step, 2 * step]])[0]),
(1, blob(centers=[[1 * step, 1 * step]], cluster_std=5)[0]),
]
X_blobs = [Xy[1] for Xy in Xy_blobs]
X_true = np.vstack(X_blobs)
y_blobs = [np.full(len(X), y_, dtype=np.int) for y_, X in Xy_blobs]
# nanify some values
if True:
X_blobs[3][:, 1] = np.nan
X_blobs[7][:, 0] = np.nan
X_blobs[5][:, 0] = np.nan
X_blobs[-1][:, :] = np.nan
X = np.vstack(X_blobs)
y = np.hstack(y_blobs)
return X_true, X, y
def show_nan_decision_function_2d(X, y, X_true, clf):
import numpy as np
print('Drawing')
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
plot_step = 1.0
x_min, x_max = X_true[:, 0].min() - 1, X_true[:, 0].max() + 1
y_min, y_max = X_true[:, 1].min() - 1, X_true[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
yynan = np.full(yy.shape, fill_value=np.nan)
xxnan = np.full(yy.shape, fill_value=np.nan)
# Get prediction surface in the non-nan-zone
Z_nonnan = clf.predict_proba(
np.c_[xx.ravel(), yy.ravel()]).T[1].reshape(xx.shape)
# Get prediction surface in the xnan-zone
Z_xnan = clf.predict_proba(
np.c_[xxnan.ravel(), yy.ravel()]).T[1].reshape(xx.shape)
# Get prediction surface in the ynan-zone
Z_ynan = clf.predict_proba(
np.c_[xx.ravel(), yynan.ravel()]).T[1].reshape(xx.shape)
# Get prediction surface for all-nan-zone
Z_fullnan = clf.predict_proba(
np.c_[xxnan.ravel(), yynan.ravel()]).T[1].reshape(xx.shape)
is_nonnan = np.logical_and(~np.isnan(X.T[0]), ~np.isnan(X.T[1]))
is_xnan = np.logical_and(np.isnan(X.T[0]), ~np.isnan(X.T[1]))
is_ynan = np.logical_and(~np.isnan(X.T[0]), np.isnan(X.T[1]))
is_fullnan = np.logical_and(np.isnan(X.T[0]), np.isnan(X.T[1]))
# Draw surfaces and support points in different axes
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
gs = gridspec.GridSpec(17, 17)
pnum1 = (gs[0:8, 0:8],)
pnum2 = (gs[0:8, 8:16],)
pnum3 = (gs[9:17, 0:8],)
pnum4 = (gs[9:17, 8:16],)
fig = plt.figure()
cmap = plt.cm.RdYlBu
norm = plt.Normalize(vmin=0, vmax=1)
sm = plt.cm.ScalarMappable(cmap=cmap)
sm.set_array(np.linspace(0, 1))
color0 = cmap(0)
print('color0 = %r' % (color0,))
color1 = cmap(1.0)
print('color1 = %r' % (color1,))
def draw_line_segments(pts1, pts2, ax=None, **kwargs):
import matplotlib as mpl
if ax is None:
ax = plt.gca()
assert len(pts1) == len(pts2), 'unaligned'
segments = [(xy1, xy2) for xy1, xy2 in zip(pts1, pts2)]
linewidth = kwargs.pop('lw', kwargs.pop('linewidth', 1.0))
alpha = kwargs.pop('alpha', 1.0)
line_group = mpl.collections.LineCollection(segments, linewidth,
alpha=alpha, **kwargs)
ax.add_collection(line_group)
def draw_single_nan_lines(X_true, y, flags, nan_dim):
if not np.any(flags):
return
nandim_min = np.nanmin(X_true.T[nan_dim])
nandim_max = np.nanmax(X_true.T[nan_dim])
num_dim = 1 - nan_dim # 2d only
numdim_pts = X[flags].T[num_dim]
pts1 = np.empty((flags.sum(), 2))
pts2 = np.empty((flags.sum(), 2))
pts1[:, nan_dim] = nandim_min
pts2[:, nan_dim] = nandim_max
pts1[:, num_dim] = numdim_pts
pts2[:, num_dim] = numdim_pts
y_ = y[flags]
draw_line_segments(pts1[y_ == 0], pts2[y_ == 0], color=color0, linestyle='-', alpha=1.0)
draw_line_segments(pts1[y_ == 1], pts2[y_ == 1], color=color1, linestyle='-', alpha=1.0)
def draw_train_points(X_true, y, flags):
plt.plot(X_true[flags].T[0][y[flags] == 0], X_true[flags].T[1][y[flags] == 0], 'o', color=color0, markeredgecolor='w')
plt.plot(X_true[flags].T[0][y[flags] == 1], X_true[flags].T[1][y[flags] == 1], 'o', color=color1, markeredgecolor='w')
def _contour(Z):
plt.contourf(xx, yy, Z, cmap=cmap, norm=norm, alpha=1.0)
fig.add_subplot(*pnum1)
_contour(Z_nonnan)
flags = is_nonnan
draw_train_points(X_true, y, flags)
plt.title('non-nan decision surface')
plt.gca().set_aspect('equal')
fig.add_subplot(*pnum2)
_contour(Z_xnan)
flags = is_xnan
draw_train_points(X_true, y, flags)
draw_single_nan_lines(X_true, y, flags, 0)
plt.gca().set_xticks([])
plt.gca().set_xlabel('nan')
plt.title('x-nan decision surface')
plt.gca().set_aspect('equal')
fig.add_subplot(*pnum3)
_contour(Z_ynan)
flags = is_ynan
draw_train_points(X_true, y, flags)
# make nan-lines
draw_single_nan_lines(X_true, y, flags, 1)
plt.title('y-nan decision surface')
plt.gca().set_aspect('equal')
plt.gca().set_yticks([])
plt.gca().set_ylabel('nan')
fig.add_subplot(*pnum4)
_contour(Z_fullnan)
flags = is_fullnan
draw_train_points(X_true, y, flags)
plt.title('full-nan decision surface')
plt.gca().set_aspect('equal')
plt.gca().set_xticks([])
plt.gca().set_yticks([])
plt.gca().set_xlabel('nan')
plt.gca().set_ylabel('nan')
plt.gcf().suptitle('RandomForestClassifier With NaN decision criteria')
gs = gridspec.GridSpec(1, 16)
subspec = gs[:, -1:]
cax = plt.subplot(subspec)
plt.colorbar(sm, cax)
cax.set_ylabel('probability class 1')
new_subplotpars = fig.subplotpars.__dict__.copy()
del new_subplotpars['validate']
new_subplotpars.update(left=.001, right=.9, top=.9, bottom=.05, hspace=1.0, wspace=1.0)
plt.subplots_adjust(**new_subplotpars)
def main():
r"""
SeeAlso:
python -m sklearn.ensemble.tests.test_forest test_multioutput
CommandLine:
python -m ibeis toy_classify_nans
python -m ibeis toy_classify_nans --toy1 --save "rf_nan_toy1.jpg" --figsize=10,10
python -m ibeis toy_classify_nans --toy2 --save "rf_nan_toy2.jpg" --figsize=10,10
python -m ibeis toy_classify_nans --toy2 --save "rf_nan_toy3.jpg" --figsize=10,10 --extra
python -m ibeis toy_classify_nans --toy2 --save "rf_nan_toy4.jpg" --figsize=10,10 --extra --nanrate=0
python -m ibeis toy_classify_nans --toy2 --save "rf_nan_toy5.jpg" --figsize=10,10 --nanrate=0
Example:
>>> # DISABLE_DOCTEST
>>> result = toy_classify_nans()
"""
from sklearn.ensemble import RandomForestClassifier
rng = np.random.RandomState(42)
print('Creating test data')
X_true, X, y = get_toydata(rng)
assert len(X) == len(y)
print('Fitting RF on %d points' % (len(X),))
# Train uncalibrated random forest classifier on train data
clf = RandomForestClassifier(n_estimators=64, random_state=42,
criterion='gini',
missing_values=np.nan, bootstrap=False)
# import pprint
# pprint.pprint(clf.__dict__)
clf.fit(X, y)
# pprint.pprint(clf.__dict__)
show_nan_decision_function_2d(X, y, X_true, clf)
if __name__ == '__main__':
r"""
CommandLine:
python -m ibeis.algo.hots.toy_nan_rf --show
"""
main()
import matplotlib.pyplot as plt
plt.show()
| apache-2.0 |
GuessWhoSamFoo/pandas | pandas/tests/io/formats/test_to_excel.py | 3 | 10955 | """Tests formatting as writer-agnostic ExcelCells
ExcelFormatter is tested implicitly in pandas/tests/io/test_excel.py
"""
import pytest
import pandas.util.testing as tm
from pandas.io.formats.css import CSSWarning
from pandas.io.formats.excel import CSSToExcelConverter
@pytest.mark.parametrize('css,expected', [
# FONT
# - name
('font-family: foo,bar', {'font': {'name': 'foo'}}),
('font-family: "foo bar",baz', {'font': {'name': 'foo bar'}}),
('font-family: foo,\nbar', {'font': {'name': 'foo'}}),
('font-family: foo, bar, baz', {'font': {'name': 'foo'}}),
('font-family: bar, foo', {'font': {'name': 'bar'}}),
('font-family: \'foo bar\', baz', {'font': {'name': 'foo bar'}}),
('font-family: \'foo \\\'bar\', baz', {'font': {'name': 'foo \'bar'}}),
('font-family: "foo \\"bar", baz', {'font': {'name': 'foo "bar'}}),
('font-family: "foo ,bar", baz', {'font': {'name': 'foo ,bar'}}),
# - family
('font-family: serif', {'font': {'name': 'serif', 'family': 1}}),
('font-family: Serif', {'font': {'name': 'serif', 'family': 1}}),
('font-family: roman, serif', {'font': {'name': 'roman', 'family': 1}}),
('font-family: roman, sans-serif', {'font': {'name': 'roman',
'family': 2}}),
('font-family: roman, sans serif', {'font': {'name': 'roman'}}),
('font-family: roman, sansserif', {'font': {'name': 'roman'}}),
('font-family: roman, cursive', {'font': {'name': 'roman', 'family': 4}}),
('font-family: roman, fantasy', {'font': {'name': 'roman', 'family': 5}}),
# - size
('font-size: 1em', {'font': {'size': 12}}),
('font-size: xx-small', {'font': {'size': 6}}),
('font-size: x-small', {'font': {'size': 7.5}}),
('font-size: small', {'font': {'size': 9.6}}),
('font-size: medium', {'font': {'size': 12}}),
('font-size: large', {'font': {'size': 13.5}}),
('font-size: x-large', {'font': {'size': 18}}),
('font-size: xx-large', {'font': {'size': 24}}),
('font-size: 50%', {'font': {'size': 6}}),
# - bold
('font-weight: 100', {'font': {'bold': False}}),
('font-weight: 200', {'font': {'bold': False}}),
('font-weight: 300', {'font': {'bold': False}}),
('font-weight: 400', {'font': {'bold': False}}),
('font-weight: normal', {'font': {'bold': False}}),
('font-weight: lighter', {'font': {'bold': False}}),
('font-weight: bold', {'font': {'bold': True}}),
('font-weight: bolder', {'font': {'bold': True}}),
('font-weight: 700', {'font': {'bold': True}}),
('font-weight: 800', {'font': {'bold': True}}),
('font-weight: 900', {'font': {'bold': True}}),
# - italic
('font-style: italic', {'font': {'italic': True}}),
('font-style: oblique', {'font': {'italic': True}}),
# - underline
('text-decoration: underline',
{'font': {'underline': 'single'}}),
('text-decoration: overline',
{}),
('text-decoration: none',
{}),
# - strike
('text-decoration: line-through',
{'font': {'strike': True}}),
('text-decoration: underline line-through',
{'font': {'strike': True, 'underline': 'single'}}),
('text-decoration: underline; text-decoration: line-through',
{'font': {'strike': True}}),
# - color
('color: red', {'font': {'color': 'FF0000'}}),
('color: #ff0000', {'font': {'color': 'FF0000'}}),
('color: #f0a', {'font': {'color': 'FF00AA'}}),
# - shadow
('text-shadow: none', {'font': {'shadow': False}}),
('text-shadow: 0px -0em 0px #CCC', {'font': {'shadow': False}}),
('text-shadow: 0px -0em 0px #999', {'font': {'shadow': False}}),
('text-shadow: 0px -0em 0px', {'font': {'shadow': False}}),
('text-shadow: 2px -0em 0px #CCC', {'font': {'shadow': True}}),
('text-shadow: 0px -2em 0px #CCC', {'font': {'shadow': True}}),
('text-shadow: 0px -0em 2px #CCC', {'font': {'shadow': True}}),
('text-shadow: 0px -0em 2px', {'font': {'shadow': True}}),
('text-shadow: 0px -2em', {'font': {'shadow': True}}),
# FILL
# - color, fillType
('background-color: red', {'fill': {'fgColor': 'FF0000',
'patternType': 'solid'}}),
('background-color: #ff0000', {'fill': {'fgColor': 'FF0000',
'patternType': 'solid'}}),
('background-color: #f0a', {'fill': {'fgColor': 'FF00AA',
'patternType': 'solid'}}),
# BORDER
# - style
('border-style: solid',
{'border': {'top': {'style': 'medium'},
'bottom': {'style': 'medium'},
'left': {'style': 'medium'},
'right': {'style': 'medium'}}}),
('border-style: solid; border-width: thin',
{'border': {'top': {'style': 'thin'},
'bottom': {'style': 'thin'},
'left': {'style': 'thin'},
'right': {'style': 'thin'}}}),
('border-top-style: solid; border-top-width: thin',
{'border': {'top': {'style': 'thin'}}}),
('border-top-style: solid; border-top-width: 1pt',
{'border': {'top': {'style': 'thin'}}}),
('border-top-style: solid',
{'border': {'top': {'style': 'medium'}}}),
('border-top-style: solid; border-top-width: medium',
{'border': {'top': {'style': 'medium'}}}),
('border-top-style: solid; border-top-width: 2pt',
{'border': {'top': {'style': 'medium'}}}),
('border-top-style: solid; border-top-width: thick',
{'border': {'top': {'style': 'thick'}}}),
('border-top-style: solid; border-top-width: 4pt',
{'border': {'top': {'style': 'thick'}}}),
('border-top-style: dotted',
{'border': {'top': {'style': 'mediumDashDotDot'}}}),
('border-top-style: dotted; border-top-width: thin',
{'border': {'top': {'style': 'dotted'}}}),
('border-top-style: dashed',
{'border': {'top': {'style': 'mediumDashed'}}}),
('border-top-style: dashed; border-top-width: thin',
{'border': {'top': {'style': 'dashed'}}}),
('border-top-style: double',
{'border': {'top': {'style': 'double'}}}),
# - color
('border-style: solid; border-color: #0000ff',
{'border': {'top': {'style': 'medium', 'color': '0000FF'},
'right': {'style': 'medium', 'color': '0000FF'},
'bottom': {'style': 'medium', 'color': '0000FF'},
'left': {'style': 'medium', 'color': '0000FF'}}}),
('border-top-style: double; border-top-color: blue',
{'border': {'top': {'style': 'double', 'color': '0000FF'}}}),
('border-top-style: solid; border-top-color: #06c',
{'border': {'top': {'style': 'medium', 'color': '0066CC'}}}),
# ALIGNMENT
# - horizontal
('text-align: center',
{'alignment': {'horizontal': 'center'}}),
('text-align: left',
{'alignment': {'horizontal': 'left'}}),
('text-align: right',
{'alignment': {'horizontal': 'right'}}),
('text-align: justify',
{'alignment': {'horizontal': 'justify'}}),
# - vertical
('vertical-align: top',
{'alignment': {'vertical': 'top'}}),
('vertical-align: text-top',
{'alignment': {'vertical': 'top'}}),
('vertical-align: middle',
{'alignment': {'vertical': 'center'}}),
('vertical-align: bottom',
{'alignment': {'vertical': 'bottom'}}),
('vertical-align: text-bottom',
{'alignment': {'vertical': 'bottom'}}),
# - wrap_text
('white-space: nowrap',
{'alignment': {'wrap_text': False}}),
('white-space: pre',
{'alignment': {'wrap_text': False}}),
('white-space: pre-line',
{'alignment': {'wrap_text': False}}),
('white-space: normal',
{'alignment': {'wrap_text': True}}),
# NUMBER FORMAT
('number-format: 0%',
{'number_format': {'format_code': '0%'}}),
])
def test_css_to_excel(css, expected):
convert = CSSToExcelConverter()
assert expected == convert(css)
def test_css_to_excel_multiple():
convert = CSSToExcelConverter()
actual = convert('''
font-weight: bold;
text-decoration: underline;
color: red;
border-width: thin;
text-align: center;
vertical-align: top;
unused: something;
''')
assert {"font": {"bold": True, "underline": "single", "color": "FF0000"},
"border": {"top": {"style": "thin"},
"right": {"style": "thin"},
"bottom": {"style": "thin"},
"left": {"style": "thin"}},
"alignment": {"horizontal": "center",
"vertical": "top"}} == actual
@pytest.mark.parametrize('css,inherited,expected', [
('font-weight: bold', '',
{'font': {'bold': True}}),
('', 'font-weight: bold',
{'font': {'bold': True}}),
('font-weight: bold', 'font-style: italic',
{'font': {'bold': True, 'italic': True}}),
('font-style: normal', 'font-style: italic',
{'font': {'italic': False}}),
('font-style: inherit', '', {}),
('font-style: normal; font-style: inherit', 'font-style: italic',
{'font': {'italic': True}}),
])
def test_css_to_excel_inherited(css, inherited, expected):
convert = CSSToExcelConverter(inherited)
assert expected == convert(css)
@pytest.mark.parametrize("input_color,output_color", (
[(name, rgb) for name, rgb in CSSToExcelConverter.NAMED_COLORS.items()] +
[("#" + rgb, rgb) for rgb in CSSToExcelConverter.NAMED_COLORS.values()] +
[("#F0F", "FF00FF"), ("#ABC", "AABBCC")])
)
def test_css_to_excel_good_colors(input_color, output_color):
# see gh-18392
css = ("border-top-color: {color}; "
"border-right-color: {color}; "
"border-bottom-color: {color}; "
"border-left-color: {color}; "
"background-color: {color}; "
"color: {color}").format(color=input_color)
expected = dict()
expected["fill"] = {
"patternType": "solid",
"fgColor": output_color
}
expected["font"] = {
"color": output_color
}
expected["border"] = {
k: {
"color": output_color,
} for k in ("top", "right", "bottom", "left")
}
with tm.assert_produces_warning(None):
convert = CSSToExcelConverter()
assert expected == convert(css)
@pytest.mark.parametrize("input_color", [None, "not-a-color"])
def test_css_to_excel_bad_colors(input_color):
# see gh-18392
css = ("border-top-color: {color}; "
"border-right-color: {color}; "
"border-bottom-color: {color}; "
"border-left-color: {color}; "
"background-color: {color}; "
"color: {color}").format(color=input_color)
expected = dict()
if input_color is not None:
expected["fill"] = {
"patternType": "solid"
}
with tm.assert_produces_warning(CSSWarning):
convert = CSSToExcelConverter()
assert expected == convert(css)
| bsd-3-clause |
gracecox/MagPySV | setup.py | 1 | 1993 | """minimal package setup"""
import os
from setuptools import setup, find_packages
# Utility function to read the README file.
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="magpysv",
version="2.1",
author="Grace Cox",
author_email="[email protected]",
license="MIT",
url="https://github.com/gracecox/MagPySV",
description="Download, process and denoise geomagnetic observatory data",
long_description=read('readme.rst'),
packages=find_packages(),
include_package_data=True,
package_data={"magpysv": ["baseline_records"]},
classifiers=["Programming Language :: Python :: 3"],
zip_safe=False,
install_requires=['aacgmv2>=2.5.2',
'cartopy>=0.17.0',
'chaosmagpy',
'datetime',
'glob2',
'jupyter>=1.0.0',
'matplotlib>=2.0.0',
'notebook>=4.3.1',
'numpy>=1.12.0',
'pandas>=0.19.2',
'requests>=2.12.4',
'scikit-learn>=0.18.1',
'scipy>=0.18.1',
'gmdata_webinterface'],
extras_require={'develop': ['jupyter>=1.0.0',
'matplotlib>=2.0.0',
'notebook>=4.3.1',
'numpy>=1.12.0',
'pandas>=0.19.2',
'requests>=2.12.4',
'scikit-learn>=0.18.1',
'scipy>=0.18.1',
'prospector>=0.12.7',
'pytest>=3.0.6',
'pytest-cov<2.6',
'Sphinx>=1.5.1',
'sphinx-rtd-theme>=0.1.9',
'gmdata_webinterface']}
)
| mit |
ryfeus/lambda-packs | Tensorflow_Pandas_Numpy/source3.6/pandas/compat/numpy/function.py | 10 | 13077 | """
For compatibility with numpy libraries, pandas functions or
methods have to accept '*args' and '**kwargs' parameters to
accommodate numpy arguments that are not actually used or
respected in the pandas implementation.
To ensure that users do not abuse these parameters, validation
is performed in 'validators.py' to make sure that any extra
parameters passed correspond ONLY to those in the numpy signature.
Part of that validation includes whether or not the user attempted
to pass in non-default values for these extraneous parameters. As we
want to discourage users from relying on these parameters when calling
the pandas implementation, we want them only to pass in the default values
for these parameters.
This module provides a set of commonly used default arguments for functions
and methods that are spread throughout the codebase. This module will make it
easier to adjust to future upstream changes in the analogous numpy signatures.
"""
from numpy import ndarray
from pandas.util._validators import (validate_args, validate_kwargs,
validate_args_and_kwargs)
from pandas.errors import UnsupportedFunctionCall
from pandas.core.dtypes.common import is_integer, is_bool
from pandas.compat import OrderedDict
class CompatValidator(object):
def __init__(self, defaults, fname=None, method=None,
max_fname_arg_count=None):
self.fname = fname
self.method = method
self.defaults = defaults
self.max_fname_arg_count = max_fname_arg_count
def __call__(self, args, kwargs, fname=None,
max_fname_arg_count=None, method=None):
if args or kwargs:
fname = self.fname if fname is None else fname
max_fname_arg_count = (self.max_fname_arg_count if
max_fname_arg_count is None
else max_fname_arg_count)
method = self.method if method is None else method
if method == 'args':
validate_args(fname, args, max_fname_arg_count, self.defaults)
elif method == 'kwargs':
validate_kwargs(fname, kwargs, self.defaults)
elif method == 'both':
validate_args_and_kwargs(fname, args, kwargs,
max_fname_arg_count,
self.defaults)
else:
raise ValueError("invalid validation method "
"'{method}'".format(method=method))
ARGMINMAX_DEFAULTS = dict(out=None)
validate_argmin = CompatValidator(ARGMINMAX_DEFAULTS, fname='argmin',
method='both', max_fname_arg_count=1)
validate_argmax = CompatValidator(ARGMINMAX_DEFAULTS, fname='argmax',
method='both', max_fname_arg_count=1)
def process_skipna(skipna, args):
if isinstance(skipna, ndarray) or skipna is None:
args = (skipna,) + args
skipna = True
return skipna, args
def validate_argmin_with_skipna(skipna, args, kwargs):
"""
If 'Series.argmin' is called via the 'numpy' library,
the third parameter in its signature is 'out', which
takes either an ndarray or 'None', so check if the
'skipna' parameter is either an instance of ndarray or
is None, since 'skipna' itself should be a boolean
"""
skipna, args = process_skipna(skipna, args)
validate_argmin(args, kwargs)
return skipna
def validate_argmax_with_skipna(skipna, args, kwargs):
"""
If 'Series.argmax' is called via the 'numpy' library,
the third parameter in its signature is 'out', which
takes either an ndarray or 'None', so check if the
'skipna' parameter is either an instance of ndarray or
is None, since 'skipna' itself should be a boolean
"""
skipna, args = process_skipna(skipna, args)
validate_argmax(args, kwargs)
return skipna
ARGSORT_DEFAULTS = OrderedDict()
ARGSORT_DEFAULTS['axis'] = -1
ARGSORT_DEFAULTS['kind'] = 'quicksort'
ARGSORT_DEFAULTS['order'] = None
validate_argsort = CompatValidator(ARGSORT_DEFAULTS, fname='argsort',
max_fname_arg_count=0, method='both')
# two different signatures of argsort, this second validation
# for when the `kind` param is supported
ARGSORT_DEFAULTS_KIND = OrderedDict()
ARGSORT_DEFAULTS_KIND['axis'] = -1
ARGSORT_DEFAULTS_KIND['order'] = None
validate_argsort_kind = CompatValidator(ARGSORT_DEFAULTS_KIND, fname='argsort',
max_fname_arg_count=0, method='both')
def validate_argsort_with_ascending(ascending, args, kwargs):
"""
If 'Categorical.argsort' is called via the 'numpy' library, the
first parameter in its signature is 'axis', which takes either
an integer or 'None', so check if the 'ascending' parameter has
either integer type or is None, since 'ascending' itself should
be a boolean
"""
if is_integer(ascending) or ascending is None:
args = (ascending,) + args
ascending = True
validate_argsort_kind(args, kwargs, max_fname_arg_count=3)
return ascending
CLIP_DEFAULTS = dict(out=None)
validate_clip = CompatValidator(CLIP_DEFAULTS, fname='clip',
method='both', max_fname_arg_count=3)
def validate_clip_with_axis(axis, args, kwargs):
"""
If 'NDFrame.clip' is called via the numpy library, the third
parameter in its signature is 'out', which can takes an ndarray,
so check if the 'axis' parameter is an instance of ndarray, since
'axis' itself should either be an integer or None
"""
if isinstance(axis, ndarray):
args = (axis,) + args
axis = None
validate_clip(args, kwargs)
return axis
COMPRESS_DEFAULTS = OrderedDict()
COMPRESS_DEFAULTS['axis'] = None
COMPRESS_DEFAULTS['out'] = None
validate_compress = CompatValidator(COMPRESS_DEFAULTS, fname='compress',
method='both', max_fname_arg_count=1)
CUM_FUNC_DEFAULTS = OrderedDict()
CUM_FUNC_DEFAULTS['dtype'] = None
CUM_FUNC_DEFAULTS['out'] = None
validate_cum_func = CompatValidator(CUM_FUNC_DEFAULTS, method='both',
max_fname_arg_count=1)
validate_cumsum = CompatValidator(CUM_FUNC_DEFAULTS, fname='cumsum',
method='both', max_fname_arg_count=1)
def validate_cum_func_with_skipna(skipna, args, kwargs, name):
"""
If this function is called via the 'numpy' library, the third
parameter in its signature is 'dtype', which takes either a
'numpy' dtype or 'None', so check if the 'skipna' parameter is
a boolean or not
"""
if not is_bool(skipna):
args = (skipna,) + args
skipna = True
validate_cum_func(args, kwargs, fname=name)
return skipna
ALLANY_DEFAULTS = OrderedDict()
ALLANY_DEFAULTS['dtype'] = None
ALLANY_DEFAULTS['out'] = None
validate_all = CompatValidator(ALLANY_DEFAULTS, fname='all',
method='both', max_fname_arg_count=1)
validate_any = CompatValidator(ALLANY_DEFAULTS, fname='any',
method='both', max_fname_arg_count=1)
LOGICAL_FUNC_DEFAULTS = dict(out=None)
validate_logical_func = CompatValidator(LOGICAL_FUNC_DEFAULTS, method='kwargs')
MINMAX_DEFAULTS = dict(out=None)
validate_min = CompatValidator(MINMAX_DEFAULTS, fname='min',
method='both', max_fname_arg_count=1)
validate_max = CompatValidator(MINMAX_DEFAULTS, fname='max',
method='both', max_fname_arg_count=1)
RESHAPE_DEFAULTS = dict(order='C')
validate_reshape = CompatValidator(RESHAPE_DEFAULTS, fname='reshape',
method='both', max_fname_arg_count=1)
REPEAT_DEFAULTS = dict(axis=None)
validate_repeat = CompatValidator(REPEAT_DEFAULTS, fname='repeat',
method='both', max_fname_arg_count=1)
ROUND_DEFAULTS = dict(out=None)
validate_round = CompatValidator(ROUND_DEFAULTS, fname='round',
method='both', max_fname_arg_count=1)
SORT_DEFAULTS = OrderedDict()
SORT_DEFAULTS['axis'] = -1
SORT_DEFAULTS['kind'] = 'quicksort'
SORT_DEFAULTS['order'] = None
validate_sort = CompatValidator(SORT_DEFAULTS, fname='sort',
method='kwargs')
STAT_FUNC_DEFAULTS = OrderedDict()
STAT_FUNC_DEFAULTS['dtype'] = None
STAT_FUNC_DEFAULTS['out'] = None
validate_stat_func = CompatValidator(STAT_FUNC_DEFAULTS,
method='kwargs')
validate_sum = CompatValidator(STAT_FUNC_DEFAULTS, fname='sort',
method='both', max_fname_arg_count=1)
validate_mean = CompatValidator(STAT_FUNC_DEFAULTS, fname='mean',
method='both', max_fname_arg_count=1)
STAT_DDOF_FUNC_DEFAULTS = OrderedDict()
STAT_DDOF_FUNC_DEFAULTS['dtype'] = None
STAT_DDOF_FUNC_DEFAULTS['out'] = None
validate_stat_ddof_func = CompatValidator(STAT_DDOF_FUNC_DEFAULTS,
method='kwargs')
TAKE_DEFAULTS = OrderedDict()
TAKE_DEFAULTS['out'] = None
TAKE_DEFAULTS['mode'] = 'raise'
validate_take = CompatValidator(TAKE_DEFAULTS, fname='take',
method='kwargs')
def validate_take_with_convert(convert, args, kwargs):
"""
If this function is called via the 'numpy' library, the third
parameter in its signature is 'axis', which takes either an
ndarray or 'None', so check if the 'convert' parameter is either
an instance of ndarray or is None
"""
if isinstance(convert, ndarray) or convert is None:
args = (convert,) + args
convert = True
validate_take(args, kwargs, max_fname_arg_count=3, method='both')
return convert
TRANSPOSE_DEFAULTS = dict(axes=None)
validate_transpose = CompatValidator(TRANSPOSE_DEFAULTS, fname='transpose',
method='both', max_fname_arg_count=0)
def validate_transpose_for_generic(inst, kwargs):
try:
validate_transpose(tuple(), kwargs)
except ValueError as e:
klass = type(inst).__name__
msg = str(e)
# the Panel class actual relies on the 'axes' parameter if called
# via the 'numpy' library, so let's make sure the error is specific
# about saying that the parameter is not supported for particular
# implementations of 'transpose'
if "the 'axes' parameter is not supported" in msg:
msg += " for {klass} instances".format(klass=klass)
raise ValueError(msg)
def validate_window_func(name, args, kwargs):
numpy_args = ('axis', 'dtype', 'out')
msg = ("numpy operations are not "
"valid with window objects. "
"Use .{func}() directly instead ".format(func=name))
if len(args) > 0:
raise UnsupportedFunctionCall(msg)
for arg in numpy_args:
if arg in kwargs:
raise UnsupportedFunctionCall(msg)
def validate_rolling_func(name, args, kwargs):
numpy_args = ('axis', 'dtype', 'out')
msg = ("numpy operations are not "
"valid with window objects. "
"Use .rolling(...).{func}() instead ".format(func=name))
if len(args) > 0:
raise UnsupportedFunctionCall(msg)
for arg in numpy_args:
if arg in kwargs:
raise UnsupportedFunctionCall(msg)
def validate_expanding_func(name, args, kwargs):
numpy_args = ('axis', 'dtype', 'out')
msg = ("numpy operations are not "
"valid with window objects. "
"Use .expanding(...).{func}() instead ".format(func=name))
if len(args) > 0:
raise UnsupportedFunctionCall(msg)
for arg in numpy_args:
if arg in kwargs:
raise UnsupportedFunctionCall(msg)
def validate_groupby_func(name, args, kwargs, allowed=None):
"""
'args' and 'kwargs' should be empty, except for allowed
kwargs because all of
their necessary parameters are explicitly listed in
the function signature
"""
if allowed is None:
allowed = []
kwargs = set(kwargs) - set(allowed)
if len(args) + len(kwargs) > 0:
raise UnsupportedFunctionCall((
"numpy operations are not valid "
"with groupby. Use .groupby(...)."
"{func}() instead".format(func=name)))
RESAMPLER_NUMPY_OPS = ('min', 'max', 'sum', 'prod',
'mean', 'std', 'var')
def validate_resampler_func(method, args, kwargs):
"""
'args' and 'kwargs' should be empty because all of
their necessary parameters are explicitly listed in
the function signature
"""
if len(args) + len(kwargs) > 0:
if method in RESAMPLER_NUMPY_OPS:
raise UnsupportedFunctionCall((
"numpy operations are not valid "
"with resample. Use .resample(...)."
"{func}() instead".format(func=method)))
else:
raise TypeError("too many arguments passed in")
| mit |
joshamilton/Hamilton_acI_2017 | code/mapping/02coverage.py | 1 | 12720 | #%%#############################################################################
# coverage.py
# Copyright (c) 2017, Joshua J Hamilton and Katherine D McMahon
# Affiliation: Department of Bacteriology
# University of Wisconsin-Madison, Madison, Wisconsin, USA
# URL: http://http://mcmahonlab.wisc.edu/
# All rights reserved.
################################################################################
# This function plots coverage of each contig and genome
################################################################################
#%%#############################################################################
### Import packages
################################################################################
from Bio import SeqIO
import math
import os
import pandas as pd
import subprocess
#%%#############################################################################
### Static folder structure
################################################################################
# Define fixed input and output files
concatFolder = '../../data/refGenomes/concat'
genomeFolder = '../../data/refGenomes/fnaKBase'
gffFolder = '../../data/refGenomes/gff'
sampleFolder = '../../data/sequences'
mapFolder = '../../data/mapping'
bamFolder = '../../data/mapping/bamFiles'
coverageFolder = '../../data/mapping/coverage-pooled'
# Check that the new output directory exists and create if it doesn't
if not os.path.exists(mapFolder):
os.makedirs(mapFolder)
if not os.path.exists(bamFolder):
os.makedirs(bamFolder)
if not os.path.exists(coverageFolder):
os.makedirs(coverageFolder)
#%%#############################################################################
### Read in sample and genome lists.
################################################################################
sampleList = []
for sample in os.listdir(sampleFolder):
if sample.endswith('.fastq'):
sampleList.append(sample)
sampleList = [sample.replace('.fastq', '') for sample in sampleList]
genomeList = []
for genome in os.listdir(genomeFolder):
if genome.endswith('.fna'):
genomeList.append(genome)
genomeList = [genome.replace('.fna', '') for genome in genomeList]
concatList = []
for concat in os.listdir(concatFolder):
if concat.endswith('.fna'):
concatList.append(concat)
concatList = [concat.replace('.fna', '') for concat in concatList]
#%%#############################################################################
### Compute coverage of each individual genome in each sample
### Part 1: Compute SAM to BAM
################################################################################
# Using bamtools, compute the depth of each position along the chromosome
## First convert sam to indexed bam
for sample in sampleList:
print('Indexing sample '+str(sample))
for concat in concatList:
subprocess.call('samtools view -bS '+bamFolder+'/'+sample+'-'+concat+'.sam | samtools sort -o '+bamFolder+'/'+sample+'-'+concat+'.bam', shell=True)
#%%#############################################################################
### Compute coverage of each individual genome in each sample
### Part 2: Compute depth of each base
################################################################################
## Compute depth for each sample
for sample in sampleList:
print('Depth calculations for sample '+str(sample))
for concat in concatList:
subprocess.call('samtools depth '+bamFolder+'/'+sample+'-'+concat+'.bam > '+coverageFolder+'/'+sample+'-'+concat+'.depth', shell=True)
#%%#############################################################################
### Compute coverage of each individual genome
### Part 3: Compute coverage of each contig and genome
################################################################################
# Using FASTA files (contigs in each genome), compute average coverage of each genome and contig
## Create dataframe to store genome coverage on a per-sample basis
coverageDF = pd.DataFrame(index = genomeList, columns=['% Covered', 'Coverage'])
for concat in concatList:
for genome in genomeList:
print('Contig coverage for genome '+str(genome))
# Create a dataframe to store contig depth, length, and coverage
# Create a dataframe to store the % covered - must track each base on each contig
contigList = []
coverContigList = []
coverPositionList = []
for curSeq in SeqIO.parse(genomeFolder+'/'+genome+'.fna', 'fasta'):
contigList.append(curSeq.id)
coverContigList = coverContigList + ([curSeq.id] * len(curSeq))
coverPositionList = coverPositionList + list(range(1,len(curSeq)+1))
contigDF = pd.DataFrame(0, index = contigList, columns=['Covered', 'Depth', 'Length', '% Covered', 'Coverage'])
coverDF = pd.DataFrame(0, index = pd.MultiIndex.from_tuples(list(zip(*[coverContigList, coverPositionList]))), columns=['Depth'])
for sample in sampleList:
# Make a depth file for the genome
subprocess.call('grep '+genome+' '+coverageFolder+'/'+sample+'-'+concat+'.depth > '+coverageFolder+'/'+sample+'-'+genome+'.depth', shell=True)
# Import the depth file (if it exists)
if os.stat(coverageFolder+'/'+sample+'-'+genome+'.depth').st_size > 0:
depthDF = pd.read_csv(coverageFolder+'/'+sample+'-'+genome+'.depth', index_col = [0,1], names = ['Depth'], sep='\t')
# Store depth of each base across samples
#coverDF['Depth'] = coverDF['Depth'] + depthDF['Depth']
coverDF = coverDF.add(depthDF, axis=1, fill_value=0)
# Compute the depth of each contig
for curIndex in depthDF.index.levels[0]:
contigDF.loc[curIndex, 'Depth'] = contigDF.loc[curIndex, 'Depth'] + depthDF.loc[curIndex].sum()[0]
# Store the length of each contig
for curSeq in SeqIO.parse(genomeFolder+'/'+genome+'.fna', 'fasta'):
contigDF.loc[curSeq.id, 'Length'] = len(curSeq)
# Compute the coverage of each contig across all samples
contigDF['Coverage'] = contigDF['Depth'] / contigDF['Length']
# Compute the % covered of each contig across all samples
for curIndex in coverDF.index.levels[0]:
# Subset the coverDF belonging to this contig
subsetCoverDF = coverDF.loc[curIndex]
# Subset the coverDF having nonzero depth
subsetCoverDF = subsetCoverDF.loc[subsetCoverDF['Depth']>0]
contigDF.loc[curIndex, 'Covered'] = len(subsetCoverDF)
# Check to see if contig is covered. Update coverage appropriately.
if len(subsetCoverDF) > 0:
contigDF.loc[curIndex, '% Covered'] = contigDF.loc[curIndex, 'Covered'] / contigDF.loc[curIndex, 'Length']
else:
contigDF.loc[curIndex, '% Covered'] = 0
contigDF.to_csv(coverageFolder+'/'+genome+'.contig.coverage')
# Update coverage and % covered for the entire genome
coverageDF.loc[genome, 'Coverage'] = float(contigDF['Depth'].sum()) / contigDF['Length'].sum()
coverageDF.loc[genome, '% Covered'] = float(contigDF['Covered'].sum()) / contigDF['Length'].sum()
coverageDF.to_csv(coverageFolder+'/coverage.csv')
#%%#############################################################################
### Compute coverage of each individual genome
### Part 4: Compute coverage of each individual gene in each genome
################################################################################
# Using GFF files (contigs in each genome), compute average coverage of each gene
for concat in concatList:
for genome in genomeList:
print('Gene coverage for genome '+str(genome))
# Create a dataframe to store gene depth, length, and coverage
geneDF = pd.DataFrame(columns=['Contig', 'Start', 'Stop', 'Length', 'Covered', 'Depth', '% Covered', 'Coverage', 'Evenness'])
# Import the GFF file and use its values to populate the geneDF
with open(gffFolder+'/'+genome+'.gff', 'r') as gffFile:
next(gffFile)
for line in gffFile:
gffArray = line.split('\t')
contig = gffArray[0]
start = int(gffArray[3])
stop = int(gffArray[4])
locusArray = gffArray[8].split(';')
locus = locusArray[0].split('=')[1]
# Populate the geneDF dataframe using these values
geneDF.loc[locus, 'Contig'] = contig
geneDF.loc[locus, 'Start'] = start
geneDF.loc[locus, 'Stop'] = stop
geneDF.loc[locus, 'Length'] = stop - start + 1
# Create a dataframe to store contig depth at each position
contigList = []
coverContigList = []
coverPositionList = []
for curSeq in SeqIO.parse(genomeFolder+'/'+genome+'.fna', 'fasta'):
contigList.append(curSeq.id)
coverContigList = coverContigList + ([curSeq.id] * len(curSeq))
coverPositionList = coverPositionList + list(range(1,len(curSeq)+1))
coverDF = pd.DataFrame(0, index = pd.MultiIndex.from_tuples(list(zip(*[coverContigList, coverPositionList]))), columns=['Depth'])
for sample in sampleList:
# Make a depth file for the genome
subprocess.call('grep '+genome+' '+coverageFolder+'/'+sample+'-'+concat+'.depth > '+coverageFolder+'/'+sample+'-'+genome+'.depth', shell=True)
# Import the depth file (if it exists)
if os.stat(coverageFolder+'/'+sample+'-'+genome+'.depth').st_size > 0:
depthDF = pd.read_csv(coverageFolder+'/'+sample+'-'+genome+'.depth', index_col = [0,1], names = ['Depth'], sep='\t')
# Store depth of each base across samples
#coverDF['Depth'] = coverDF['Depth'] + depthDF['Depth']
coverDF = coverDF.add(depthDF, axis=1, fill_value=0)
# Compute the depth and coverage of each gene
for curIndex in geneDF.index:
contig = geneDF.loc[curIndex]['Contig']
start = geneDF.loc[curIndex]['Start']
stop = geneDF.loc[curIndex]['Stop']
# If the depth file exists:
if os.stat(coverageFolder+'/'+sample+'-'+genome+'.depth').st_size > 0:
# Subset the depthDF belonging to this contig and range
subsetCoverDF = coverDF.loc[(contig, start):(contig, stop)]
# Subset the depthDF having nonzero depth
subsetCoverDF = subsetCoverDF.loc[subsetCoverDF['Depth']>0]
# Update the geneDF with coverage and depth of each gene
geneDF.loc[curIndex, 'Covered'] = len(subsetCoverDF)
geneDF.loc[curIndex, 'Depth'] = subsetCoverDF.sum()[0]
# Compute the coverage of each gene
geneDF.loc[curIndex, '% Covered'] = float(geneDF.loc[curIndex, 'Covered']) / float(geneDF.loc[curIndex, 'Length'])
geneDF.loc[curIndex, 'Coverage'] = float(geneDF.loc[curIndex, 'Depth']) / float(geneDF.loc[curIndex, 'Length'])
# If the gene is covered...
# Compute the evenness of coverage using Pielou's eveness
if len(subsetCoverDF) > 0:
countList = subsetCoverDF['Depth'].value_counts().tolist()
# Account for unmapped loci
if (geneDF.loc[curIndex]['Stop'] - geneDF.loc[curIndex]['Start'] + 1) > sum(countList):
countList.append((geneDF.loc[curIndex]['Stop'] - geneDF.loc[curIndex]['Start'] + 1) - sum(countList))
freqDist = [float(x) / sum(countList) for x in countList]
Hprime = 0
for freq in freqDist:
Hprime = Hprime + freq*math.log(freq)
# If only one frequency, assign an evenness of 1
if len(freqDist) == 1:
geneDF.loc[curIndex, 'Evenness'] = 1
else:
Hprime_max = math.log(len(freqDist))
geneDF.loc[curIndex, 'Evenness'] = - Hprime / Hprime_max
# Simplify the dataframe and write to file
geneDF = geneDF.drop(['Contig', 'Start', 'Stop', 'Length', 'Covered', 'Depth'], 1)
geneDF.to_csv(coverageFolder+'/'+genome+'.gene.coverage')
| mit |
automl/paramsklearn | tests/components/data_preprocessing/test_balancing.py | 1 | 7836 | __author__ = 'feurerm'
import unittest
import numpy as np
import sklearn.metrics
from ParamSklearn.components.data_preprocessing.balancing import Balancing
from ParamSklearn.classification import ParamSklearnClassifier
from ParamSklearn.components.classification.adaboost import AdaboostClassifier
from ParamSklearn.components.classification.decision_tree import DecisionTree
from ParamSklearn.components.classification.extra_trees import ExtraTreesClassifier
from ParamSklearn.components.classification.gradient_boosting import GradientBoostingClassifier
from ParamSklearn.components.classification.random_forest import RandomForest
from ParamSklearn.components.classification.liblinear_svc import LibLinear_SVC
from ParamSklearn.components.classification.libsvm_svc import LibSVM_SVC
from ParamSklearn.components.classification.sgd import SGD
from ParamSklearn.components.feature_preprocessing\
.extra_trees_preproc_for_classification import ExtraTreesPreprocessor
from ParamSklearn.components.feature_preprocessing.liblinear_svc_preprocessor import LibLinear_Preprocessor
from ParamSklearn.components.feature_preprocessing.random_trees_embedding import RandomTreesEmbedding
from ParamSklearn.util import get_dataset
class BalancingComponentTest(unittest.TestCase):
def test_balancing_get_weights_treed_single_label(self):
Y = np.array([0] * 80 + [1] * 20)
balancing = Balancing(strategy='weighting')
init_params, fit_params = balancing.get_weights(
Y, 'adaboost', None, None, None)
self.assertTrue(np.allclose(fit_params['classifier:sample_weight'],
np.array([0.4] * 80 + [1.6] * 20)))
#init_params, fit_params = balancing.get_weights(
# Y, None, 'extra_trees_preproc_for_classification', None, None)
#self.assertTrue(np.allclose(fit_params['preprocessor:sample_weight'],
# np.array([0.4] * 80 + [1.6] * 20)))
def test_balancing_get_weights_treed_multilabel(self):
Y = np.array([[0, 0, 0]] * 100 + [[1, 0, 0]] * 100 + [[0, 1, 0]] * 100 +
[[1, 1, 0]] * 100 + [[0, 0, 1]] * 100 + [[1, 0, 1]] * 10)
balancing = Balancing(strategy='weighting')
init_params, fit_params = balancing.get_weights(
Y, 'adaboost', None, None, None)
self.assertTrue(np.allclose(fit_params['classifier:sample_weight'],
np.array([0.4] * 500 + [4.0] * 10)))
#init_params, fit_params = balancing.get_weights(
# Y, None, 'extra_trees_preproc_for_classification', None, None)
#self.assertTrue(np.allclose(fit_params['preprocessor:sample_weight'],
# np.array([0.4] * 500 + [4.0] * 10)))
def test_balancing_get_weights_svm_sgd(self):
Y = np.array([0] * 80 + [1] * 20)
balancing = Balancing(strategy='weighting')
init_params, fit_params = balancing.get_weights(
Y, 'libsvm_svc', None, None, None)
self.assertEqual(("classifier:class_weight", "auto"),
list(init_params.items())[0])
init_params, fit_params = balancing.get_weights(
Y, None, 'liblinear_svc_preprocessor', None, None)
self.assertEqual(("preprocessor:class_weight", "auto"),
list(init_params.items())[0])
def test_weighting_effect(self):
for name, clf, acc_no_weighting, acc_weighting in \
[('adaboost', AdaboostClassifier, 0.692, 0.719),
('decision_tree', DecisionTree, 0.712, 0.668),
('extra_trees', ExtraTreesClassifier, 0.901, 0.919),
('gradient_boosting', GradientBoostingClassifier, 0.879, 0.883),
('random_forest', RandomForest, 0.886, 0.885),
('libsvm_svc', LibSVM_SVC, 0.915, 0.937),
('liblinear_svc', LibLinear_SVC, 0.920, 0.923),
('sgd', SGD, 0.811, 0.902)]:
for strategy, acc in [('none', acc_no_weighting),
('weighting', acc_weighting)]:
# Fit
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
include={'classifier': [name]})
default = cs.get_default_configuration()
default._values['balancing:strategy'] = strategy
classifier = ParamSklearnClassifier(default, random_state=1)
predictor = classifier.fit(X_train, Y_train)
predictions = predictor.predict(X_test)
self.assertAlmostEqual(acc,
sklearn.metrics.accuracy_score(predictions, Y_test),
places=3)
# pre_transform and fit_estimator
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
include={'classifier': [name]})
default = cs.get_default_configuration()
default._values['balancing:strategy'] = strategy
classifier = ParamSklearnClassifier(default, random_state=1)
Xt, fit_params = classifier.pre_transform(X_train, Y_train)
classifier.fit_estimator(Xt, Y_train, fit_params=fit_params)
predictions = classifier.predict(X_test)
self.assertAlmostEqual(acc,
sklearn.metrics.accuracy_score(
predictions, Y_test),
places=3)
for name, pre, acc_no_weighting, acc_weighting in \
[('extra_trees_preproc_for_classification',
ExtraTreesPreprocessor, 0.892, 0.910),
('liblinear_svc_preprocessor', LibLinear_Preprocessor,
0.906, 0.909)]:
for strategy, acc in [('none', acc_no_weighting),
('weighting', acc_weighting)]:
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
include={'classifier': ['sgd'], 'preprocessor': [name]})
default = cs.get_default_configuration()
default._values['balancing:strategy'] = strategy
classifier = ParamSklearnClassifier(default, random_state=1)
predictor = classifier.fit(X_train, Y_train)
predictions = predictor.predict(X_test)
self.assertAlmostEqual(acc,
sklearn.metrics.accuracy_score(
predictions, Y_test),
places=3)
# pre_transform and fit_estimator
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
cs = ParamSklearnClassifier.get_hyperparameter_search_space(
include={'classifier': ['sgd'], 'preprocessor': [name]})
default = cs.get_default_configuration()
default._values['balancing:strategy'] = strategy
classifier = ParamSklearnClassifier(default, random_state=1)
Xt, fit_params = classifier.pre_transform(X_train, Y_train)
classifier.fit_estimator(Xt, Y_train, fit_params=fit_params)
predictions = classifier.predict(X_test)
self.assertAlmostEqual(acc,
sklearn.metrics.accuracy_score(
predictions, Y_test),
places=3) | bsd-3-clause |
q1ang/scikit-learn | sklearn/gaussian_process/gaussian_process.py | 78 | 34552 | # -*- coding: utf-8 -*-
# Author: Vincent Dubourg <[email protected]>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
MACHINE_EPSILON = np.finfo(np.double).eps
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The Gaussian Process model class.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = np.atleast_2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = np.atleast_2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = np.atleast_2d(self.thetaL)
self.thetaU = np.atleast_2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
| bsd-3-clause |
imaculate/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
ankanch/tieba-zhuaqu | user-application/KCrawlerControal/Debug/plugins/wordstimeline/lib/graphicsData.py | 10 | 2672 | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
import numpy
font_set = FontProperties(fname=r"c:\\windows\\fonts\\simsun.ttc", size=15)
#基本柱形统计图
def barGraphics(xLabel,yLabel,xValueList,yValueList,graphicTitle='图例',xWidth=0.5):
lbwidth = []
x = 1
for item in xValueList:
lbwidth.append(x)
x += xWidth
plt.title(graphicTitle,fontproperties=font_set)
plt.xlabel(xLabel,fontproperties=font_set)
plt.ylabel(yLabel,fontproperties=font_set)
plt.xticks(lbwidth,xValueList,fontproperties=font_set)
rect = plt.bar(left = lbwidth,height = yValueList,width = xWidth,align="center",yerr=0.000001)
autolabel(rect)
plt.show()
#折线图:蓝色粗线
def linePlotGraphics(xLabel,yLabel,xValueList,yValueList,graphicTitle='图例'):
with plt.style.context('fivethirtyeight'):
plt.title(graphicTitle,fontproperties=font_set,fontsize=20)
plt.xlabel(xLabel,fontproperties=font_set)
plt.ylabel(yLabel,fontproperties=font_set)
plt.xticks(numpy.arange(len(xValueList)),xValueList,rotation=45,fontproperties=font_set)
plt.plot(yValueList)
yValueList.sort()
#设置y轴区间以及图像最低端距x轴距离
print("len(yValueList)=",len(yValueList))
plt.ylim(-1.0, yValueList[len(yValueList)-1]+1)
plt.subplots_adjust(bottom=0.15,left=0.05,right=0.98,top=0.92)
#下面的代码用来设置网格线
ax = plt.gca()
ax.get_xaxis().tick_bottom() #仅显示下面的x轴的ticks
ax.get_yaxis().tick_left()
ax.grid(b=False,axis='x')
axis = ax.xaxis
for line in axis.get_ticklines():
line.set_color("gray")
line.set_markersize(6)
line.set_markeredgewidth(1)
#显示折线图
plt.show()
#plt.savefig('percent-bachelors-degrees-women-usa.png', bbox_inches='tight')
#散点图:蓝色点
def scatterPlotsGraphics(xLabel,yLabel,xValueList,yValueList,graphicTitle='图例'):
with plt.style.context('fivethirtyeight'):
plt.plot(xValueList, yValueList,'o')
plt.show()
#用来显示柱形图顶部的数字标识
def autolabel(rects):
for rect in rects:
height = rect.get_height()
plt.text(rect.get_x()+rect.get_width()/2., 1.03*height, '%s' % int(height))
#barGraphics('等级','数量',['A','B','C','D','E','F'],[29,30,40,47,38,23],'测试图例')
#linePlotGraphics("xLabel","yLabel",[1,2,3,4,5,6,7,8,9,10],[1.1,1.9,2.6,3.6,9.8,14,24,40,80,150],graphicTitle='图例')
#scatterPlotsGraphics("xLabel","yLabel",[1,2,3,4,5,6,7,8,9,10],[1,11.9,2,6.3,6,9.8,14,4,8,5],graphicTitle='图例')
| gpl-3.0 |
tillahoffmann/tensorflow | tensorflow/examples/learn/text_classification_character_cnn.py | 29 | 5666 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of using convolutional networks over characters for DBpedia dataset.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
N_FILTERS = 10
FILTER_SHAPE1 = [20, 256]
FILTER_SHAPE2 = [20, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
MAX_LABEL = 15
CHARS_FEATURE = 'chars' # Name of the input character feature.
def char_cnn_model(features, labels, mode):
"""Character level convolutional neural network model to predict classes."""
features_onehot = tf.one_hot(features[CHARS_FEATURE], 256)
input_layer = tf.reshape(
features_onehot, [-1, MAX_DOCUMENT_LENGTH, 256, 1])
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.layers.conv2d(
input_layer,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE1,
padding='VALID',
# Add a ReLU for non linearity.
activation=tf.nn.relu)
# Max pooling across output of Convolution+Relu.
pool1 = tf.layers.max_pooling2d(
conv1,
pool_size=POOLING_WINDOW,
strides=POOLING_STRIDE,
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.layers.conv2d(
pool1,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE2,
padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.layers.dense(pool2, MAX_LABEL, activation=None)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
onehot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data, size='large')
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = tf.contrib.learn.preprocessing.ByteProcessor(
MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
x_train = x_train.reshape([-1, MAX_DOCUMENT_LENGTH, 1, 1])
x_test = x_test.reshape([-1, MAX_DOCUMENT_LENGTH, 1, 1])
# Build model
classifier = tf.estimator.Estimator(model_fn=char_cnn_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={CHARS_FEATURE: x_train},
y=y_train,
batch_size=len(x_train),
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={CHARS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
vigilv/scikit-learn | examples/exercises/plot_cv_digits.py | 232 | 1206 | """
=============================================
Cross-validation on Digits Dataset Exercise
=============================================
A tutorial exercise using Cross-validation with an SVM on the Digits dataset.
This exercise is used in the :ref:`cv_generators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
from sklearn import cross_validation, datasets, svm
digits = datasets.load_digits()
X = digits.data
y = digits.target
svc = svm.SVC(kernel='linear')
C_s = np.logspace(-10, 0, 10)
scores = list()
scores_std = list()
for C in C_s:
svc.C = C
this_scores = cross_validation.cross_val_score(svc, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
# Do the plotting
import matplotlib.pyplot as plt
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.semilogx(C_s, scores)
plt.semilogx(C_s, np.array(scores) + np.array(scores_std), 'b--')
plt.semilogx(C_s, np.array(scores) - np.array(scores_std), 'b--')
locs, labels = plt.yticks()
plt.yticks(locs, list(map(lambda x: "%g" % x, locs)))
plt.ylabel('CV score')
plt.xlabel('Parameter C')
plt.ylim(0, 1.1)
plt.show()
| bsd-3-clause |
zihua/scikit-learn | examples/bicluster/bicluster_newsgroups.py | 142 | 7183 | """
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
Output::
Vectorizing...
Coclustering...
Done in 9.53s. V-measure: 0.4455
MiniBatchKMeans...
Done in 12.00s. V-measure: 0.3309
Best biclusters:
----------------
bicluster 0 : 1951 documents, 4373 words
categories : 23% talk.politics.guns, 19% talk.politics.misc, 14% sci.med
words : gun, guns, geb, banks, firearms, drugs, gordon, clinton, cdt, amendment
bicluster 1 : 1165 documents, 3304 words
categories : 29% talk.politics.mideast, 26% soc.religion.christian, 25% alt.atheism
words : god, jesus, christians, atheists, kent, sin, morality, belief, resurrection, marriage
bicluster 2 : 2219 documents, 2830 words
categories : 18% comp.sys.mac.hardware, 16% comp.sys.ibm.pc.hardware, 16% comp.graphics
words : voltage, dsp, board, receiver, circuit, shipping, packages, stereo, compression, package
bicluster 3 : 1860 documents, 2745 words
categories : 26% rec.motorcycles, 23% rec.autos, 13% misc.forsale
words : bike, car, dod, engine, motorcycle, ride, honda, cars, bmw, bikes
bicluster 4 : 12 documents, 155 words
categories : 100% rec.sport.hockey
words : scorer, unassisted, reichel, semak, sweeney, kovalenko, ricci, audette, momesso, nedved
"""
from __future__ import print_function
print(__doc__)
from collections import defaultdict
import operator
import re
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
def number_aware_tokenizer(doc):
""" Tokenizer that maps all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
tokens = token_pattern.findall(doc)
tokens = ["#NUMBER" if token[0] in "0123456789_" else token
for token in tokens]
return tokens
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = TfidfVectorizer(stop_words='english', min_df=5,
tokenizer=number_aware_tokenizer)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
# Note: the following is identical to X[rows[:, np.newaxis], cols].sum() but
# much faster in scipy <= 0.16
weight = X[rows][:, cols].sum()
cut = (X[row_complement][:, cols].sum() +
X[rows][:, col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
| bsd-3-clause |
hbp-unibi/SNABSuite | plot/2dim_plot.py | 1 | 5552 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# SNABSuite -- Spiking Neural Architecture Benchmark Suite
# Copyright (C) 2017 Andreas Stöckel, Christoph Jenzen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
"""
Plots data for two dimensional sweeps
"""
from __future__ import division
from builtins import range
from past.utils import old_div
import argparse
parser = argparse.ArgumentParser(description='Plot two-dimensional images')
# Optional arguments
parser.add_argument("--zmin", type=float, help="minimal z-value")
parser.add_argument("--zmax", type=float, help="maximal z-value")
parser.add_argument(
"-nl", type=int, help="Number of levels/ticks in z", default=11)
parser.add_argument("-q", help="qualitative Colormap", action="store_true")
parser.add_argument("-c", help="draw contour lines", action="store_true")
# Required Parameters
parser.add_argument("-z", type=int, required=True, help="Column of z-values")
parser.add_argument("files", metavar="Files", nargs='+', help="files to plot")
args = parser.parse_args()
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colorbar
import sys
import os
from dim_labels import *
def cm2inch(value):
return value / 2.54
def round_to_divisable(value, divisable):
if value == 0:
return 0
temp = np.abs(value)
a = 0
while temp < divisable:
temp *= 10.0
a += 1
if temp % divisable == 0:
return value
res = old_div((temp - (temp % divisable) + divisable), (10.0**a))
if value < 0:
return -res
return res
def plot_measure2d(xs, ys, zs, xlabel, ylabel, zlabel="", zmin=None,
zmax=None, qualitative=False, contour=True, title=None):
fig = plt.figure(figsize=(cm2inch(5.5), cm2inch(5.5)))
ax1 = fig.add_axes([0.0, 0.25, 1.0, 0.85])
if title is not None:
plt.title(title)
ax2 = fig.add_axes([0.0, 0.0, 1.0, 0.05])
_, steps_x = np.unique(xs, return_counts=True)
_, steps_y = np.unique(ys, return_counts=True)
steps_x = np.max(steps_x)
steps_y = np.max(steps_y)
xs = xs.reshape((steps_y, steps_x))
ys = ys.reshape((steps_y, steps_x))
zs = zs.reshape((steps_y, steps_x))
zs = zs.transpose()
# Auto-scale
idcs = np.isfinite(zs)
if np.sum(idcs) == 0:
return
if zmin is None:
zmin = np.min(zs[idcs])
if 0 < zmin < 1:
zmin = 0
else:
zmin = int(zmin)
if zmax is None:
zmax = round_to_divisable(np.max(zs[idcs]), args.nl - 1)
if zmin > 0:
zmax = zmax + zmin
if 0 < zmax < 1:
zmax = 1
# Select the colormap
if qualitative:
cmap = plt.cm.rainbow
else:
#cmap = plt.cm.Purples
# if zmin < 0.0:
cmap = plt.cm.PuOr
cmap.set_bad('black', 1.)
extent = (np.min(xs), np.max(xs), np.min(ys), np.max(ys))
ax1.imshow(zs, aspect='auto', origin='lower', extent=extent, cmap=cmap,
vmin=zmin, vmax=zmax, interpolation="none")
levels = np.linspace(zmin, zmax, args.nl)
zs = zs.transpose()
if contour:
CS2 = ax1.contour(xs, ys, zs, levels, linewidths=0.25,
colors='k', vmin=zmin, vmax=zmax)
ax1.grid(color='black', linestyle=':', linewidth=0.25)
ax1.set_xlabel(xlabel)
ax1.set_ylabel(ylabel)
cbar = matplotlib.colorbar.ColorbarBase(ax2, cmap=cmap,
orientation='horizontal', ticks=levels,
norm=matplotlib.colors.Normalize(zmin, zmax))
cbar.set_label(zlabel)
return fig
if not os.path.exists("images"):
os.mkdir("images")
for target_file in args.files:
simulator = target_file.split('_')[-1].split('.csv')[0]
experiment = target_file.split('/')[-1].split(simulator)[0]
#import data
results = np.genfromtxt(target_file, delimiter=',', names=True)
keys = results.dtype.names
data = np.zeros((results.shape[0], len(keys)))
for i in range(0, len(results)):
data[i] = np.array(list(results[i]))
fig = plot_measure2d(data[:, 0], data[:, 1], data[:, args.z],
xlabel=get_label(keys[0]), ylabel=get_label(keys[1]),
zlabel=get_label(keys[args.z]), zmin=args.zmin,
zmax=args.zmax, qualitative=args.q, contour=args.c,
title=SIMULATOR_LABELS[simulator])
if target_file.split('/')[-2]:
if not os.path.exists("images/" + target_file.split('/')[-2]):
os.mkdir("images/" + target_file.split('/')[-2])
fig.savefig("images/" + target_file.split('/')[-2] + "/" +
experiment + simulator + ".pdf", format='pdf',
bbox_inches='tight')
else:
fig.savefig("images/" + experiment + simulator + ".pdf", format='pdf',
bbox_inches='tight')
| gpl-3.0 |
paultcochrane/bokeh | bokeh/compat/mplexporter/exporter.py | 32 | 12403 | """
Matplotlib Exporter
===================
This submodule contains tools for crawling a matplotlib figure and exporting
relevant pieces to a renderer.
"""
import warnings
import io
from . import utils
import matplotlib
from matplotlib import transforms
from matplotlib.backends.backend_agg import FigureCanvasAgg
class Exporter(object):
"""Matplotlib Exporter
Parameters
----------
renderer : Renderer object
The renderer object called by the exporter to create a figure
visualization. See mplexporter.Renderer for information on the
methods which should be defined within the renderer.
close_mpl : bool
If True (default), close the matplotlib figure as it is rendered. This
is useful for when the exporter is used within the notebook, or with
an interactive matplotlib backend.
"""
def __init__(self, renderer, close_mpl=True):
self.close_mpl = close_mpl
self.renderer = renderer
def run(self, fig):
"""
Run the exporter on the given figure
Parmeters
---------
fig : matplotlib.Figure instance
The figure to export
"""
# Calling savefig executes the draw() command, putting elements
# in the correct place.
if fig.canvas is None:
fig.canvas = FigureCanvasAgg(fig)
fig.savefig(io.BytesIO(), format='png', dpi=fig.dpi)
if self.close_mpl:
import matplotlib.pyplot as plt
plt.close(fig)
self.crawl_fig(fig)
@staticmethod
def process_transform(transform, ax=None, data=None, return_trans=False,
force_trans=None):
"""Process the transform and convert data to figure or data coordinates
Parameters
----------
transform : matplotlib Transform object
The transform applied to the data
ax : matplotlib Axes object (optional)
The axes the data is associated with
data : ndarray (optional)
The array of data to be transformed.
return_trans : bool (optional)
If true, return the final transform of the data
force_trans : matplotlib.transform instance (optional)
If supplied, first force the data to this transform
Returns
-------
code : string
Code is either "data", "axes", "figure", or "display", indicating
the type of coordinates output.
transform : matplotlib transform
the transform used to map input data to output data.
Returned only if return_trans is True
new_data : ndarray
Data transformed to match the given coordinate code.
Returned only if data is specified
"""
if isinstance(transform, transforms.BlendedGenericTransform):
warnings.warn("Blended transforms not yet supported. "
"Zoom behavior may not work as expected.")
if force_trans is not None:
if data is not None:
data = (transform - force_trans).transform(data)
transform = force_trans
code = "display"
if ax is not None:
for (c, trans) in [("data", ax.transData),
("axes", ax.transAxes),
("figure", ax.figure.transFigure),
("display", transforms.IdentityTransform())]:
if transform.contains_branch(trans):
code, transform = (c, transform - trans)
break
if data is not None:
if return_trans:
return code, transform.transform(data), transform
else:
return code, transform.transform(data)
else:
if return_trans:
return code, transform
else:
return code
def crawl_fig(self, fig):
"""Crawl the figure and process all axes"""
with self.renderer.draw_figure(fig=fig,
props=utils.get_figure_properties(fig)):
for ax in fig.axes:
self.crawl_ax(ax)
def crawl_ax(self, ax):
"""Crawl the axes and process all elements within"""
with self.renderer.draw_axes(ax=ax,
props=utils.get_axes_properties(ax)):
for line in ax.lines:
self.draw_line(ax, line)
for text in ax.texts:
self.draw_text(ax, text)
for (text, ttp) in zip([ax.xaxis.label, ax.yaxis.label, ax.title],
["xlabel", "ylabel", "title"]):
if(hasattr(text, 'get_text') and text.get_text()):
self.draw_text(ax, text, force_trans=ax.transAxes,
text_type=ttp)
for artist in ax.artists:
# TODO: process other artists
if isinstance(artist, matplotlib.text.Text):
self.draw_text(ax, artist)
for patch in ax.patches:
self.draw_patch(ax, patch)
for collection in ax.collections:
self.draw_collection(ax, collection)
for image in ax.images:
self.draw_image(ax, image)
legend = ax.get_legend()
if legend is not None:
props = utils.get_legend_properties(ax, legend)
with self.renderer.draw_legend(legend=legend, props=props):
if props['visible']:
self.crawl_legend(ax, legend)
def crawl_legend(self, ax, legend):
"""
Recursively look through objects in legend children
"""
legendElements = list(utils.iter_all_children(legend._legend_box,
skipContainers=True))
legendElements.append(legend.legendPatch)
for child in legendElements:
# force a large zorder so it appears on top
child.set_zorder(1E6 + child.get_zorder())
try:
# What kind of object...
if isinstance(child, matplotlib.patches.Patch):
self.draw_patch(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.text.Text):
if not (child is legend.get_children()[-1]
and child.get_text() == 'None'):
self.draw_text(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.lines.Line2D):
self.draw_line(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.collections.Collection):
self.draw_collection(ax, child,
force_pathtrans=ax.transAxes)
else:
warnings.warn("Legend element %s not impemented" % child)
except NotImplementedError:
warnings.warn("Legend element %s not impemented" % child)
def draw_line(self, ax, line, force_trans=None):
"""Process a matplotlib line and call renderer.draw_line"""
coordinates, data = self.process_transform(line.get_transform(),
ax, line.get_xydata(),
force_trans=force_trans)
linestyle = utils.get_line_style(line)
if linestyle['dasharray'] is None:
linestyle = None
markerstyle = utils.get_marker_style(line)
if (markerstyle['marker'] in ['None', 'none', None]
or markerstyle['markerpath'][0].size == 0):
markerstyle = None
label = line.get_label()
if markerstyle or linestyle:
self.renderer.draw_marked_line(data=data, coordinates=coordinates,
linestyle=linestyle,
markerstyle=markerstyle,
label=label,
mplobj=line)
def draw_text(self, ax, text, force_trans=None, text_type=None):
"""Process a matplotlib text object and call renderer.draw_text"""
content = text.get_text()
if content:
transform = text.get_transform()
position = text.get_position()
coords, position = self.process_transform(transform, ax,
position,
force_trans=force_trans)
style = utils.get_text_style(text)
self.renderer.draw_text(text=content, position=position,
coordinates=coords,
text_type=text_type,
style=style, mplobj=text)
def draw_patch(self, ax, patch, force_trans=None):
"""Process a matplotlib patch object and call renderer.draw_path"""
vertices, pathcodes = utils.SVG_path(patch.get_path())
transform = patch.get_transform()
coordinates, vertices = self.process_transform(transform,
ax, vertices,
force_trans=force_trans)
linestyle = utils.get_path_style(patch, fill=patch.get_fill())
self.renderer.draw_path(data=vertices,
coordinates=coordinates,
pathcodes=pathcodes,
style=linestyle,
mplobj=patch)
def draw_collection(self, ax, collection,
force_pathtrans=None,
force_offsettrans=None):
"""Process a matplotlib collection and call renderer.draw_collection"""
(transform, transOffset,
offsets, paths) = collection._prepare_points()
offset_coords, offsets = self.process_transform(
transOffset, ax, offsets, force_trans=force_offsettrans)
path_coords = self.process_transform(
transform, ax, force_trans=force_pathtrans)
processed_paths = [utils.SVG_path(path) for path in paths]
processed_paths = [(self.process_transform(
transform, ax, path[0], force_trans=force_pathtrans)[1], path[1])
for path in processed_paths]
path_transforms = collection.get_transforms()
try:
# matplotlib 1.3: path_transforms are transform objects.
# Convert them to numpy arrays.
path_transforms = [t.get_matrix() for t in path_transforms]
except AttributeError:
# matplotlib 1.4: path transforms are already numpy arrays.
pass
styles = {'linewidth': collection.get_linewidths(),
'facecolor': collection.get_facecolors(),
'edgecolor': collection.get_edgecolors(),
'alpha': collection._alpha,
'zorder': collection.get_zorder()}
offset_dict = {"data": "before",
"screen": "after"}
offset_order = offset_dict[collection.get_offset_position()]
self.renderer.draw_path_collection(paths=processed_paths,
path_coordinates=path_coords,
path_transforms=path_transforms,
offsets=offsets,
offset_coordinates=offset_coords,
offset_order=offset_order,
styles=styles,
mplobj=collection)
def draw_image(self, ax, image):
"""Process a matplotlib image object and call renderer.draw_image"""
self.renderer.draw_image(imdata=utils.image_to_base64(image),
extent=image.get_extent(),
coordinates="data",
style={"alpha": image.get_alpha(),
"zorder": image.get_zorder()},
mplobj=image)
| bsd-3-clause |
xiaoweih/DLV | networks/twoDcurve.py | 1 | 5180 | #!/usr/bin/env python
"""
For Xiaowei to train MLP for regression
"""
import sys
import time
import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt
# keras
from keras.models import Model, Sequential
from keras.layers import Input, Dense
import keras.optimizers
# visualisation
from keras.utils.visualize_util import plot
#
#import analyze as analyzeNN
#import display
import basics
from math import sqrt
TWO_PI = 2 * np.pi
studyRangeLow = [0,0]
studyRangeHigh = [TWO_PI,TWO_PI]
def target_fun(x):
# function to regress
#p = 3*np.sin(10*x[0]) + 0.2*(x[0]**2) + 1
p = 1 + sqrt(10*(abs(x[0]-3)))
return p
def LABELS(index):
labels = ['0', '1']
return labels[index]
def save(layer,image,filename):
"""
Render a given numpy.uint8 2D array of pixel data.
"""
plt.plot(np.linspace(studyRangeLow[0], studyRangeHigh[0]), [target_fun([x]) for x in np.linspace(studyRangeLow[0], studyRangeHigh[0])], 'r')
if layer == -1:
color = 'g.'
elif layer == 0:
color = 'r.'
elif layer == 1:
color = 'b.'
elif layer == 2:
color = 'y.'
elif layer == 3:
color = 'c.'
else: color = 'b.'
plt.plot([image[0]], [image[1]], color)
plt.savefig(filename)
"""
def regression():
if len(sys.argv) > 1:
if sys.argv[1] == '0':
fromFile = False
else: fromFile = True
else: fromFile = False
N_samples = 5000
N_tests = 1000
# load data
x_train, y_train, x_test, y_test = NN.load_data(N_samples,N_tests)
if fromFile == False:
# define and construct model
print "Building network model ......"
model = NN.build_model()
plot(model, to_file='model.png')
# visualisation
# configure learning process
sgd = keras.optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss={'output': 'mse'})
model.summary()
start_time = time.time()
model.fit({'data': x_train}, {'output': y_train}, nb_epoch=3000, validation_split=0.1, verbose=0)
print("Fitting time: --- %s seconds ---" % (time.time() - start_time))
print("Training finished!")
# save model
json_string = model.to_json()
open('MLP.json', 'w').write(json_string)
model.save_weights('MLP.h5', overwrite=True)
sio.savemat('MLP.mat', {'weights': model.get_weights()})
print("Model saved!")
else:
print("Start loading model ... ")
model = basics.read_model_from_file('MLP.mat','MLP.json')
model.summary()
#display.print_structure("MLP.h5")
print("Start analyzing model ... ")
start_time = time.time()
model1 = analyzeNN.analyze(model, x_train, y_train, [studyRangeLow[0],studyRangeHigh[0]], [studyRangeLow[1],studyRangeHigh[1]], plt)
print("Analyzing time: --- %s seconds ---" % (time.time() - start_time))
model1.save_weights('MLP1.h5', overwrite=True)
#analyzeNN.print_structure("MLP1.h5")
# prediction after training
y_predicted = model.predict(x_test)
y_predicted1 = model1.predict(x_test)
# display results
plt.plot(np.linspace(studyRangeLow[0], studyRangeHigh[0]), [basics.target_fun([x]) for x in np.linspace(studyRangeLow[0], studyRangeHigh[0])], 'r')
train_set = zip(x_train,y_train)
train_set_high = [ x for (x,y) in train_set if y[0] == 1]
train_set_low = [ x for (x,y) in train_set if y[0] == 0]
(x_train_high, y_train_high) = zip(*train_set_high)
(x_train_low, y_train_low) = zip(*train_set_low)
plt.plot(x_train_high, y_train_high, 'g.')
plt.plot(x_train_low, y_train_low, 'y.')
threshold = 0.5
dangerThreshold = 0.9
plt.savefig("pic/result.png")
plt.show()
"""
"""
test_set = zip(x_test,y_test,y_predicted)
test_set_high = [ x for (x,y,z) in test_set if y[0] == True and z[0] >= threshold ]
test_set_low = [ x for (x,y,z) in test_set if y[0] == False and z[0] <= 1 - threshold ]
test_set_wrong = [ x for (x,y,z) in test_set if ~(y[0] == True and z[0] >= threshold) and ~(y[0] == False and z[0] <= 1 - threshold) ]
test_set_wrong2 = [ x for (x,y,z) in test_set if (y[0] == True and z[0] <= 1- dangerThreshold) or (y[0] == False and z[0] >= dangerThreshold) ]
print str(len(test_set_wrong)) + " testing samples are classified wrong, among all " + str(len(test_set)) +" samples "
print str(len(test_set_wrong2)) + " testing samples are classified wrong in a definite way, among all " + str(len(test_set)) +" samples "
if len(test_set_high) > 0:
(x_test_high, y_test_high) = zip(*test_set_high)
plt.plot(x_test_high, y_test_high, 'c.')
if len(test_set_low) > 0:
(x_test_low, y_test_low) = zip(*test_set_low)
plt.plot(x_test_low, y_test_low, 'b.')
(x_test_wrong, y_test_wrong) = zip(*test_set_wrong)
plt.plot(x_test_wrong, y_test_wrong, 'r.')
"""
#plt.legend(['Target line', 'Training samples high', 'Training samples low', 'Testing samples high', 'Testing samples low', 'Testing samples wrong' ])
| gpl-3.0 |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/pandas/tseries/tests/test_converter.py | 13 | 5611 | from datetime import datetime, time, timedelta, date
import sys
import os
import nose
import numpy as np
from numpy.testing import assert_almost_equal as np_assert_almost_equal
from pandas import Timestamp, Period
from pandas.compat import u
import pandas.util.testing as tm
from pandas.tseries.offsets import Second, Milli, Micro
try:
import pandas.tseries.converter as converter
except ImportError:
raise nose.SkipTest("no pandas.tseries.converter, skipping")
def test_timtetonum_accepts_unicode():
assert(converter.time2num("00:01") == converter.time2num(u("00:01")))
class TestDateTimeConverter(tm.TestCase):
def setUp(self):
self.dtc = converter.DatetimeConverter()
self.tc = converter.TimeFormatter(None)
def test_convert_accepts_unicode(self):
r1 = self.dtc.convert("12:22", None, None)
r2 = self.dtc.convert(u("12:22"), None, None)
assert(r1 == r2), "DatetimeConverter.convert should accept unicode"
def test_conversion(self):
rs = self.dtc.convert(['2012-1-1'], None, None)[0]
xp = datetime(2012, 1, 1).toordinal()
self.assertEqual(rs, xp)
rs = self.dtc.convert('2012-1-1', None, None)
self.assertEqual(rs, xp)
rs = self.dtc.convert(date(2012, 1, 1), None, None)
self.assertEqual(rs, xp)
rs = self.dtc.convert(datetime(2012, 1, 1).toordinal(), None, None)
self.assertEqual(rs, xp)
rs = self.dtc.convert('2012-1-1', None, None)
self.assertEqual(rs, xp)
rs = self.dtc.convert(Timestamp('2012-1-1'), None, None)
self.assertEqual(rs, xp)
# also testing datetime64 dtype (GH8614)
rs = self.dtc.convert(np.datetime64('2012-01-01'), None, None)
self.assertEqual(rs, xp)
rs = self.dtc.convert(np.datetime64('2012-01-01 00:00:00+00:00'), None, None)
self.assertEqual(rs, xp)
rs = self.dtc.convert(np.array([np.datetime64('2012-01-01 00:00:00+00:00'),
np.datetime64('2012-01-02 00:00:00+00:00')]), None, None)
self.assertEqual(rs[0], xp)
def test_conversion_float(self):
decimals = 9
rs = self.dtc.convert(Timestamp('2012-1-1 01:02:03', tz='UTC'), None, None)
xp = converter.dates.date2num(Timestamp('2012-1-1 01:02:03', tz='UTC'))
np_assert_almost_equal(rs, xp, decimals)
rs = self.dtc.convert(Timestamp('2012-1-1 09:02:03', tz='Asia/Hong_Kong'), None, None)
np_assert_almost_equal(rs, xp, decimals)
rs = self.dtc.convert(datetime(2012, 1, 1, 1, 2, 3), None, None)
np_assert_almost_equal(rs, xp, decimals)
def test_time_formatter(self):
self.tc(90000)
def test_dateindex_conversion(self):
decimals = 9
for freq in ('B', 'L', 'S'):
dateindex = tm.makeDateIndex(k = 10, freq = freq)
rs = self.dtc.convert(dateindex, None, None)
xp = converter.dates.date2num(dateindex._mpl_repr())
np_assert_almost_equal(rs, xp, decimals)
def test_resolution(self):
def _assert_less(ts1, ts2):
val1 = self.dtc.convert(ts1, None, None)
val2 = self.dtc.convert(ts2, None, None)
if not val1 < val2:
raise AssertionError('{0} is not less than {1}.'.format(val1, val2))
# Matplotlib's time representation using floats cannot distinguish intervals smaller
# than ~10 microsecond in the common range of years.
ts = Timestamp('2012-1-1')
_assert_less(ts, ts + Second())
_assert_less(ts, ts + Milli())
_assert_less(ts, ts + Micro(50))
class TestPeriodConverter(tm.TestCase):
def setUp(self):
self.pc = converter.PeriodConverter()
class Axis(object):
pass
self.axis = Axis()
self.axis.freq = 'D'
def test_convert_accepts_unicode(self):
r1 = self.pc.convert("2012-1-1", None, self.axis)
r2 = self.pc.convert(u("2012-1-1"), None, self.axis)
self.assert_equal(r1, r2, "PeriodConverter.convert should accept unicode")
def test_conversion(self):
rs = self.pc.convert(['2012-1-1'], None, self.axis)[0]
xp = Period('2012-1-1').ordinal
self.assertEqual(rs, xp)
rs = self.pc.convert('2012-1-1', None, self.axis)
self.assertEqual(rs, xp)
rs = self.pc.convert([date(2012, 1, 1)], None, self.axis)[0]
self.assertEqual(rs, xp)
rs = self.pc.convert(date(2012, 1, 1), None, self.axis)
self.assertEqual(rs, xp)
rs = self.pc.convert([Timestamp('2012-1-1')], None, self.axis)[0]
self.assertEqual(rs, xp)
rs = self.pc.convert(Timestamp('2012-1-1'), None, self.axis)
self.assertEqual(rs, xp)
# FIXME
# rs = self.pc.convert(np.datetime64('2012-01-01'), None, self.axis)
# self.assertEqual(rs, xp)
#
# rs = self.pc.convert(np.datetime64('2012-01-01 00:00:00+00:00'), None, self.axis)
# self.assertEqual(rs, xp)
#
# rs = self.pc.convert(np.array([np.datetime64('2012-01-01 00:00:00+00:00'),
# np.datetime64('2012-01-02 00:00:00+00:00')]), None, self.axis)
# self.assertEqual(rs[0], xp)
def test_integer_passthrough(self):
# GH9012
rs = self.pc.convert([0, 1], None, self.axis)
xp = [0, 1]
self.assertEqual(rs, xp)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-2.0 |
ycaihua/scikit-learn | sklearn/ensemble/gradient_boosting.py | 2 | 60872 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta, abstractmethod
from time import time
import numbers
import numpy as np
from scipy import stats
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..utils import check_random_state, check_array, check_X_y, column_or_1d
from ..utils import check_consistent_length
from ..utils.extmath import logsumexp
from ..utils.fixes import expit
from ..utils.stats import _weighted_percentile
from ..utils.validation import check_is_fitted, NotFittedError
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE, TREE_LEAF
from ..tree._tree import PresortBestSplitter
from ..tree._tree import FriedmanMSE
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0)
def predict(self, X):
check_is_fitted(self, 'quantile')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
check_is_fitted(self, 'mean')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
check_is_fitted(self, 'prior')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float)
class_counts = np.bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
check_is_fitted(self, 'priors')
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
check_is_fitted(self, 'n_classes')
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.abs(y - pred.ravel())))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
If the loss does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = 1.0 / (1.0 + np.exp(-score.ravel()))
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = 1.0 / (1.0 + np.exp(-2.0 * score.ravel()))
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
criterion, splitter, random_state):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=criterion,
splitter=splitter,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False)
# update tree leaves
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
max_features = int(self.max_features * self.n_features)
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, dtype=DTYPE)
n_samples, self.n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# init criterion and splitter
criterion = FriedmanMSE(1)
splitter = PresortBestSplitter(criterion,
self.max_features_,
self.min_samples_leaf,
self.min_weight_fraction_leaf,
random_state)
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, criterion, splitter,
random_state)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (old_oob_score -
loss_(y[~sample_mask], y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit`"
" before making predictions`.")
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
# Default implementation
return y
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def _validate_y(self, y):
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = ["n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self.staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self.staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
return self.decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self.staged_decision_function(X):
yield y.ravel()
| bsd-3-clause |
swirlingsand/self-driving-car-nanodegree-nd013 | p3-behavioral-cloning-project/model.py | 1 | 17800 | import tensorflow as tf
from keras.models import Sequential
from keras.layers.core import Dense, Flatten, Dropout, Lambda
from keras.layers.convolutional import Convolution2D
from keras.layers.advanced_activations import ELU
from keras.optimizers import Adam
import numpy as np
import cv2
import matplotlib.pyplot as plt
from pandas.io.parsers import read_csv
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
tf.python.control_flow_ops = tf
print('Modules loaded.')
"""
Hyperparemters
"""
# Nerual Network Training
EPOCHS = 12
SAMPLES_EPOCH = 10000
VALIDATION_SAMPLES = 3000
LEARNING_RATE = .0001
BATCH_SIZE = 2 # Size of batch
# 3*BATCH_SIZE*FEATURE_GENERATION_MULTIPLE == total_batch size
# Recovery control
# Float, handled as absolute value, valid range from 0 -> 1
RECOVERY_OFFSET = .25
# Preprocessing
FEATURE_GENERATION_MULTIPLE = 1
# Data balancing
UNIFORM_TEST_MAX = .7 # Max absolute value of data balanced
ZEROS = .50 # Percent chance of getting zeros, higher chance of getting zeros
# NOTE as we add (+/-) for recover angles, the net number of zeros
# will be ~ ZEROS / 3 ie .5 zeros setting will result in ~.16
# The program relies on a variety of random functions
# For detailed testing, use the seed value as needed
# A seed of 8373 should return 91 on the test below
SEED = 8373
custom_random = np.random.RandomState(seed=SEED)
print(custom_random.randint(0, 100))
"""
Test suite
"""
# run_once credit
# http://stackoverflow.com/questions/4103773/efficient-way-of-having-a-function-only-execute-once-in-a-loop
def run_once(f):
def wrapper(*args, **kwargs):
if not wrapper.has_run:
wrapper.has_run = True
return f(*args, **kwargs)
wrapper.has_run = False
return wrapper
@run_once
def check_Shape(check_text, X_train, y_train):
print(check_text, X_train.shape, y_train.shape)
# TODO add further testing functions here
"""
Pre processing pipeline
"""
# Track angles is used to help keep track of angles generated throughout
track_angles = []
def augment_brightness_camera_images(image):
"""
Purpose: Adjust brightness of image
Inputs: A 3D matrix, generall in the form [x,y,colours]
Outputs: Same
"""
# credit https://medium.com/@vivek.yadav
image = image.astype(np.uint8)
image1 = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
random_bright = .25 + np.random.uniform()
# print(random_bright)
image1[:, :, 2] = image1[:, :, 2] * random_bright
image1 = cv2.cvtColor(image1, cv2.COLOR_HSV2RGB)
return image1
def transform_image(img, ang_range, shear_range, trans_range):
'''
This function transforms images to generate new images.
The function takes in following arguments,
1- Image
2- ang_range: Range of angles for rotation
3- shear_range: Range of values to apply affine transform to
4- trans_range: Range of values to apply translations over.
A Random uniform distribution is used to generate different parameters
for transformation
'''
# Rotation
ang_rot = np.random.uniform(ang_range) - ang_range / 2
# updated to reflect gray pipeline
rows, cols, ch = img.shape
Rot_M = cv2.getRotationMatrix2D((cols / 2, rows / 2), ang_rot, 1)
# Translation
tr_x = trans_range * np.random.uniform() - trans_range / 2
tr_y = trans_range * np.random.uniform() - trans_range / 2
Trans_M = np.float32([[1, 0, tr_x], [0, 1, tr_y]])
# Shear
pts1 = np.float32([[5, 5], [20, 5], [5, 20]])
pt1 = 5 + shear_range * np.random.uniform() - shear_range / 2
pt2 = 20 + shear_range * np.random.uniform() - shear_range / 2
pts2 = np.float32([[pt1, 5], [pt2, pt1], [5, pt2]])
shear_M = cv2.getAffineTransform(pts1, pts2)
img = cv2.warpAffine(img, Rot_M, (cols, rows))
img = cv2.warpAffine(img, Trans_M, (cols, rows))
img = cv2.warpAffine(img, shear_M, (cols, rows))
# added for brightness
img = augment_brightness_camera_images(img)
return img
def balance_data(data):
"""
Purpose: Balance the data given a random distribution
Inputs: A CSV file
Outputs: A line of the data file
This format allow for a sigfnicant amount of flexablity,
for example, it takes an arbitrary length of data
and returns a randomized line, following the settings
specified by the user. Additionally, the user
could fairly easily change "uniform" to another
numpy distribution to test how different distributions
effect results.
"""
uniform_test_max = UNIFORM_TEST_MAX
max_tries = 1 # This is a starting value for a timeout function
zero_flag = False
data_length = len(data)
# Test if zero is less than flag set in settings
if custom_random.random_sample(1) <= ZEROS:
zero_flag = True
# Main loop entry
while True:
# This block performs balance test and selects steering angle
uniform_test = (custom_random.uniform(0, uniform_test_max))
line_number = custom_random.randint(len(data))
line = data[line_number]
steering_angle = line[3]
# Return steering angle if zero flag is true, else resume loop
if zero_flag is True:
if steering_angle == 0:
return line
else:
continue
else:
# Handle non-zero angles
if steering_angle != 0 and abs(steering_angle) <= uniform_test:
return line
else: # Handle exceptions
if max_tries > data_length:
print("Exceeded search time, skipping angle:",
steering_angle, "Data length:", data_length)
return line
else:
continue
def recovery(line):
"""
Purpose: Create a "recovery" angle based off left/right images
Inputs: A line, preferably given by balance_data()
Outputs: Y_train, a [3, 1] matrix
"""
y_train = np.empty([0, 1]) # Create a new placeholder matrix
y_single_sample = line[3] # Select a line from the data
y_single_sample = round(y_single_sample, 3) # Round sample to smooth data
# print(y_single_sample)
assert type(y_single_sample) == float
# print(type(y_single_sample))
new_labels = []
for i in range(3):
# left angles
if i == 1:
# print("Left", line[i]) #Refactor to an ASSERT
steering_adjust = +RECOVERY_OFFSET
# print(type(steering_adjust))
steering_correction = min(
1, y_single_sample + steering_adjust)
assert steering_correction != 1.01, steering_correction != -1
# print(steering_correction)
# Right angles
elif i == 2:
# print("Right", line[i])
steering_adjust = -RECOVERY_OFFSET
steering_correction = max(-1,
y_single_sample + steering_adjust)
assert steering_correction != -1.01, steering_correction != +1
# Center angles
elif i == 0:
# print("Center", line[i])
steering_correction = y_single_sample
track_angles.append(steering_correction)
new_labels.append([steering_correction])
# print("new labels", new_labels)
y_train = np.append(y_train, new_labels, axis=0)
# print(y_train)
return y_train
def chop_images(line):
"""
Purpose: Discard less important information
Inputs: A line, preferably given by balance_data()
Outputs: X_train, a [3, 80, 320, 3] tensor
"""
X_train = np.empty([0, 80, 320, 3]) # Create a new placeholder tensor
for i in range(3):
imgname = line[i].strip() # Use strip to fix whitespace issue
X_single_sample = cv2.imread(imgname) # open image with CV2
# IMPORTANT. CV2 will read teh image in BGR colour space
# Uncomment to test x shape
# print("X original shape", X_single_sample.shape)
# Perform chop, 40% off the top, 10% off the bottom.
top = int(.4 * X_single_sample.shape[0])
bottom = int(.1 * X_single_sample.shape[0])
X_single_sample = X_single_sample[top:-bottom, :]
# print("X new shape", X_single_sample.shape)
# save images for visualization if required
# scipy.misc.imsave(
# 'figs/orginal' + str(np.random.randint(999)) + '.jpg', X_single_sample)
X_train = np.append(X_train, [X_single_sample], axis=0)
return X_train
def adjust_colours(X_train):
"""
Purpose: Change colour space to RGB
Inputs: X_train, a [3, 80, 320, 3] tensor
Outputs: X_train, a [3, 80, 320, 3] tensor
"""
for feature in X_train:
feature = feature.astype(np.uint8) # fix error 215
feature = cv2.cvtColor(feature, cv2.COLOR_BGR2RGB)
# scipy.misc.imsave(
#'figs2/adjust-colours' + str(np.random.randint(10000)) + '.jpg', feature)
return X_train
def flip_images(X_train, y_train):
"""
Purpose: Flip images left to right and invert angles
This is to help the model generalize better.
Inputs: X_train, a [3, 80, 320, 3] tensor and y_train, a [3, 1] matrix
Outputs: X_train, a [3, 80, 320, 3] tensor and y_train, a [3, 1] matrix
"""
for feature in X_train:
feature = np.fliplr(feature)
for angle in y_train:
angle = -angle
return X_train, y_train
def generate_features(X_train, y_train):
"""
Purpose: Generate new features
Inputs: X_train, a [3, 80, 320, 3] tensor and y_train, a [3, 1] matrix
Outputs: X_train, a [ZZZ, 80, 320, 3] tensor and y_train, a [ZZZ, 1] matrix
Where ZZZ = (FEATURE_GENERATION_MULTIPLE * 3) + 3
"""
new_features = [] # Create placeholder arrays
new_labels = []
for i in range(FEATURE_GENERATION_MULTIPLE):
for feature in X_train:
# Settings for transofmr images function
feature = transform_image(feature, 10, 1, 1)
# credit https://github.com/navoshta/behavioral-cloning
# switched to used custom random function
# Adding shadows
h, w = feature.shape[0], feature.shape[1]
[x1, x2] = custom_random.choice(w, 2, replace=False)
k = h / (x2 - x1)
b = - k * x1
for j in range(h):
c = int((j - b) / k)
feature[j, :c, :] = (feature[j, :c, :] * .5).astype(np.int32)
# scipy.misc.imsave('figs2/gen-features' + str(np.random.randint(10000)) + '.jpg', feature)
new_features.append(feature)
for label in y_train:
new_labels.append(label)
track_angles.append(label)
X_train = np.append(X_train, new_features, axis=0)
y_train = np.append(y_train, new_labels, axis=0)
return X_train, y_train
def process_line(data):
"""
Purpose: Main entry point for processing intial data
Inputs: A CSV file of data.
Outputs: X_train, a [3, 80, 320, 3] tensor and y_train, a [3, 1] matrix
"""
# Please see functions above for further definition
balanced_line = balance_data(data)
X_train = chop_images(balanced_line)
X_train = adjust_colours(X_train)
y_train = recovery(balanced_line)
if custom_random.random_sample(1) <= .6:
flip_images(X_train, y_train)
# Check_text = "After image generation, shapes: "
# check_Shape(Check_text, X_train, y_train)
return X_train, y_train
def generate_arrays_from_file(path):
"""
Purpose: Yield tensor batches to fit_generator function
Inputs: A file path
Outputs: X_train, a [AHH, 80, 320, 3] tensor and y_train, a [AHH, 1] matrix
Where AHH = ((FEATURE_GENERATION_MULTIPLE * 3) + 3) * BATCH_SIZE
"""
batch_tracker = 0
while 1:
for batch in range(BATCH_SIZE):
# load the labels and file paths to images
data = read_csv(path).values
X_train, y_train = process_line(data) # Process existing data
X_train, y_train = generate_features(X_train, y_train)
X_train = np.append(X_train, X_train, axis=0)
y_train = np.append(y_train, y_train, axis=0)
# print(batch_tracker)
batch_tracker = batch_tracker + 1
Check_text = "Batch shape: "
check_Shape(Check_text, X_train, y_train)
yield X_train, y_train
"""
Model architecture
This model generally follows the Nvidia architecture as described here:
https://arxiv.org/abs/1604.07316
Using glorot_uniform based on this research:
https://github.com/fchollet/keras/issues/52
Decided to use expontential linear units for activation based on
https://github.com/commaai/research/blob/master/train_steering_model.py
and further reading of
https://arxiv.org/abs/1511.07289
Dropout is used to help generalize the network.
"""
# Use this control to easil test different intializations.
init_type = "glorot_uniform"
border_mode_type = "valid"
row, col, ch = 80, 320, 3
model = Sequential()
# Normalization
# Can use a negative range thanks to ELU
model.add(Lambda(lambda X_train: X_train / 127.5 - 1,
input_shape=(row, col, ch),
output_shape=(row, col, ch)))
model.add(Convolution2D(24, 5, 5,
subsample=(2, 2),
border_mode=border_mode_type,
init=init_type))
model.add(ELU())
model.add(Convolution2D(36, 5, 5,
subsample=(2, 2),
border_mode=border_mode_type,
init=init_type))
model.add(ELU())
model.add(Convolution2D(48, 5, 5,
subsample=(2, 2),
border_mode=border_mode_type,
init=init_type))
model.add(ELU())
model.add(Convolution2D(64, 3, 3,
subsample=(2, 2),
border_mode=border_mode_type,
init=init_type))
model.add(ELU())
model.add(Convolution2D(64, 3, 3,
subsample=(2, 2),
border_mode=border_mode_type,
init=init_type))
model.add(ELU())
model.add(Flatten())
model.add(Dropout(.2))
model.add(Dense(1164, init=init_type))
model.add(ELU())
model.add(Dropout(.3))
model.add(Dense(100, init=init_type))
model.add(ELU())
model.add(Dense(50, init=init_type))
model.add(ELU())
model.add(Dense(10, init=init_type))
model.add(ELU())
model.add(Dense(1))
# Using the ADAM optimizer with a custom starting learning rate
optimizer_settings = Adam(lr=LEARNING_RATE)
# As this is a continous regression problem we are using mean squared error
model.compile(optimizer=optimizer_settings, loss='mse')
# Early stopping to help test longer epochs
early_stopping = EarlyStopping(monitor='val_loss', patience=2)
# saves the model weights after each epoch if the validation loss decreased
# https://keras.io/callbacks/#create-a-callback
checkpointer = ModelCheckpoint(
filepath="temp-model/weights.hdf5", verbose=1, save_best_only=True)
if __name__ == "__main__":
print("Training")
"""
Train architecture
Using fit_generator allows the CPU to process images "on demand"
while the GPU is processing the network.
We are using a validation set to help understand the networks
ability to generalize.
"""
history = model.fit_generator(
generate_arrays_from_file('driving_log.csv'),
samples_per_epoch=SAMPLES_EPOCH, nb_epoch=EPOCHS, verbose=2,
callbacks=[early_stopping, checkpointer],
validation_data=generate_arrays_from_file('validation_log2.csv'),
nb_val_samples=VALIDATION_SAMPLES)
"""
Save model
"""
model.save('my_model.h5')
print("Model saved.")
# This presents a summary of stats from Keras
model.summary()
print("Complete.")
plt_number = custom_random.randint(0, 10000)
"""
Graph model training and validation loss performance.
"""
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
fig2 = plt.gcf()
plt.show()
fig2.savefig('testing-figures/loss-history' + str(plt_number) + '.png')
"""
Graph steering angles used by the model
"""
track_angles = np.asarray(track_angles)
print("Length of track angles: ", len(track_angles))
plt.hist(track_angles, bins='auto')
plt.title("Gaussian Histogram")
plt.xlabel("Value")
plt.ylabel("Frequency")
fig = plt.gcf()
plt.show()
fig.savefig('testing-figures/angle-histogram' + str(plt_number) + '.png')
print(min(track_angles))
print(max(track_angles))
non_zero_count = (np.count_nonzero(track_angles))
zeros_counter = len(track_angles) - non_zero_count
np_unique_angles, np_unique_counts = np.unique(
track_angles, return_counts=True)
# Provide stats
print("Non zero angles:", non_zero_count)
print("Number of zeros: ", zeros_counter)
print("Percent zero angle", zeros_counter / len(track_angles))
print("Number of original zeros (Before Recovery +/-): ", zeros_counter * 3)
print("Unique angles, non recovery:", len(np.unique(track_angles)) / 3)
print("Non zero, non recovery angles:", len(
track_angles) - zeros_counter * 3)
print(np_unique_angles, np_unique_counts)
plt.hist(np.unique(track_angles), bins='auto')
plt.title("Gaussian Histogram")
plt.xlabel("Value")
plt.ylabel("Frequency")
fig = plt.gcf()
plt.show()
fig.savefig('testing-figures/unique-angles-histogram' +
str(plt_number) + '.png')
| mit |
asantinc/translation-decoding | aligner/q3.py | 1 | 3359 | #!/usr/bin/env python
import optparse
import sys
import operator
import math
from collections import defaultdict
import numpy as np
import matplotlib.pyplot as plt
optparser = optparse.OptionParser()
optparser.add_option("-b", "--bitext", dest="bitext", default="data/dev-test-train.de-en", help="Parallel corpus (default data/dev-test-train.de-en)")
optparser.add_option("-t", "--threshold", dest="threshold", default=0.5, type="float", help="Threshold for aligning with Dice's coefficient (default=0.5)")
optparser.add_option("-n", "--num_sentences", dest="num_sents", default=200, type="int", help="Number of sentences to use for training and alignment")
optparser.add_option("-i", "--max_iters", dest="max_iters", default=10, type="int", help="Number of iterations in EM algorithm")
(opts, _) = optparser.parse_args()
bitext = [[sentence.strip().split() for sentence in pair.split(' ||| ')] for pair in open(opts.bitext)][:opts.num_sents]
# initialise the parameters P(f|e)
theta = defaultdict(float)
french_dict = defaultdict(int)
english_dict = defaultdict(int)
french = set()
for (f,e) in bitext:
french |= set(f)
for (f,e) in bitext:
for fw in f:
for ew in e:
theta[(fw,ew)] = 1.00/len(french)
# Count the number of distinct words in each language
for (f,e) in bitext:
for fw in f:
french_dict[fw] += 1
for ew in e:
english_dict[ew]+= 1
# Find the 5 most common and least common English words
sorted_french = sorted(french_dict.items(), key=operator.itemgetter(1))
sorted_english = sorted(english_dict.items(), key=operator.itemgetter(1))
bot_5 = sorted_english[:5]
top_5 = sorted_english[-5:]
ind = int(round(len(sorted_english)/2))
mid_5 = sorted_english[ind:(ind+5)]
print 'French words: ' + str(len(sorted_french)) + '\n'
# start the counter
k = 0
while k < opts.max_iters:
# increment
k += 1
# restart the counts
fe_count = defaultdict(float)
e_count = defaultdict(float)
# compute expected counts
for (n, (f,e)) in enumerate(bitext):
for fw in f:
# initialise and compute normalisation constant
Z = 0
for ew in e:
th = theta[(fw,ew)]
Z += th
for ew in e:
th = theta[(fw,ew)]
# compute expected count
c = th/Z
# increment the counts by this amount
fe_count[(fw,ew)] += c
e_count[ew] += c
if n % 100 == 0:
sys.stderr.write(".")
for (fw,ew) in fe_count.keys():
# M-step: recalculate the parameter P(f|e)
theta[(fw,ew)] = math.exp(math.log(fe_count[(fw,ew)]) - math.log(e_count[ew]))
#countryF = open('q3/morph-country.csv', 'w')
countryL = []
for fw in sorted_french:
th = theta[(fw[0], 'country')]
countryL.append((fw, th))
#countryF.write('%.20f, ' % (th))
#countryF.write('\n')
sorted_country = sorted(countryL, key=lambda tup: tup[1], reverse = True)
print sorted_country[:10]
#countriesF = open('q3/morph-countries.csv', 'w')
countriesL = []
for fw in sorted_french:
th = theta[(fw[0], 'countries')]
countriesL.append((fw, th))
#countriesF.write('%.20f, ' % (th))
#countriesF.write('\n')
sorted_countries = sorted(countriesL, key=lambda tup: tup[1], reverse = True)
print sorted_countries[:10]
| mit |
lukovkin/ufcnn-keras | models/rl/DataStore.py | 1 | 8165 |
import numpy as np
import pandas as pd
import os
import glob
import re
import time
import datetime
import itertools
class DataStore(object):
""" Load and Store Data from the Trading Competition """
def __init__(self, sequence_length=500, features_list=[1,2,3,4], path='./training_data_large/', training_days=0, testing_days = 0, mean = None, std = None):
"""
if data is already stored in pickle format, read it from dist, otherwise create it
path ... where to find the training data to load
training_days ... how many days are needed for training
testing_days ... how many days are needed for testing, is 0 if only training is used
if testing_days <> 0, then test data will be loaded
"""
self.sequence_length = sequence_length
self.Xdf_array_list = []
self.XdfBidAsk_array_list = []
self.Xdf_array_day = []
self.features_length = len(features_list) + 2 # Bid & Ask get appended onto the features list
if testing_days == 0:
if mean is not None or std is not None:
print(" When specifiying traiing days only, mean and std must not be given. Aborting.")
raise ValueError
if testing_days != 0:
if mean is None or std is None:
print(" When specifiying testing days, mean and std must be given. Aborting.")
raise ValueError
## Are there already files given
output_file_name = path + "/TradcomSave"+str(training_days)+"_"+str(testing_days)+".pickle"
Xdf = pd.DataFrame()
if os.path.isfile(output_file_name):
Xdf = pd.read_pickle(output_file_name)
else:
# We need to create the file ourselves...
file_list = sorted(glob.glob(path+'/prod_data_*v.txt'))
if len(file_list) == 0:
print ("Files "+path+"prod_data_*txt are needed. Please copy them into "+path+". Aborting.")
raise ValueError
if testing_days != 0:
start = training_days
end = training_days + testing_days
else:
start = 0
end = training_days
for filename in file_list[start:end]:
print("Working on Input file: ",filename)
# get the date...
r = re.compile('^\D*(\d*)\D*', re.UNICODE)
date = re.search(r, filename).group(1)
print("Date is ",date)
date_ux = time.mktime(datetime.datetime.strptime(date,"%Y%m%d").timetuple())
# load dataframes and reindex
Xdf_loc = pd.read_csv(filename, sep=" ", header = None,)
# print(Xdf_loc.iloc[:3])
Xdf_loc['Milliseconds'] = Xdf_loc[0]
Xdf_loc['Date'] = pd.to_datetime(date_ux*1000*1000*1000)
Xdf_loc = Xdf_loc.set_index(['Date', 'Milliseconds'], append=False, drop=True)
# print(Xdf_loc.iloc[:3])
Xdf = pd.concat([Xdf, Xdf_loc])
#print(Xdf.index[0])
#print(Xdf.index[-1])
## Store all the data in the file
Xdf.to_pickle(output_file_name)
#select by features_list
colgroups = [[2, 4], [3, 5]]
Xdf = Xdf[features_list]
# keep the bid and ask unmodified
unmodified_group = [2,4]
self.Xdf, self.mean, self.std = self.standardize_inputs(Xdf, colgroups=colgroups, mean=mean, std=std, unmodified_group=unmodified_group)
self.XdfBidAsk = self.Xdf[[2,4,'U2','U4']]
# split the Xdf along the days...
#print (self.Xdf)
for date_idx in self.Xdf.index.get_level_values(0).unique():
self.Xdf_array_list.append(self.Xdf.loc[date_idx].values)
self.XdfBidAsk_array_list.append(self.XdfBidAsk.loc[date_idx].values)
self.Xdf_array_day.append(date_idx)
## TODO remove ?
self.Xdf['U2'] = 0.
self.Xdf['U4'] = 0.
def standardize_columns(self, colgroup):
"""
Standardize group of columns together
colgroup: Pandas.DataFrame
returns: Pandas.DataFrames: Colum Group standardized, Mean of the colgroup, stddeviation of the colgroup
"""
_me = np.mean(colgroup.values.flatten())
centered = colgroup.sub(_me)
me = pd.DataFrame(np.full(len(colgroup.columns),_me), index=colgroup.columns)
_st = np.std(colgroup.values.flatten())
standardized = centered.div(_st)
st = pd.DataFrame(np.full(len(colgroup.columns),_st), index=colgroup.columns)
return standardized, me, st
def standardize_inputs(self, Xdf, colgroups=None, mean=None, std=None, unmodified_group=None):
"""
Standardize input features.
Groups of features could be listed in order to be standardized together.
Xdf: Pandas.DataFrame
colgroups: list of lists of groups of features to be standardized together (e.g. bid/ask price, bid/ask size)
returns Xdf ...Pandas.DataFrame, mean ...Pandas.DataFrame, std ...Pandas.DataFrame
"""
new_unmod_group = []
for unmod in unmodified_group:
# copy the unmodified column group
new_name = 'U'+str(unmod)
Xdf[new_name] = Xdf[unmod]
new_unmod_group.append(new_name)
df = pd.DataFrame()
me = pd.DataFrame()
st = pd.DataFrame()
for colgroup in colgroups:
_df,_me,_st = self.standardize_columns(Xdf[colgroup])
# if mean & std are given, do not multiply with colgroup mean
if mean is not None and std is not None:
_df = Xdf[colgroup]
df = pd.concat([df, _df], axis=1)
me = pd.concat([me, _me])
st = pd.concat([st, _st])
# _temp_list = list(itertools.chain.from_iterable(colgroups))
separate_features = [col for col in Xdf.columns if col not in list(itertools.chain.from_iterable(colgroups))]
if mean is None and std is None:
_me = Xdf[separate_features].mean()
_me[new_unmod_group] = 0.
_df = Xdf[separate_features].sub(_me)
_st = Xdf[separate_features].std()
_st[new_unmod_group] = 1.
_df = _df[separate_features].div(_st)
else:
_df = Xdf[separate_features]
df = pd.concat([df, _df], axis=1)
me = pd.concat([me, _me])
st = pd.concat([st, _st])
me = pd.Series(me[0])
st = pd.Series(st[0])
if mean is not None and std is not None:
mean[new_unmod_group] = 0.
std[new_unmod_group] = 1.
df = df.sub(mean)
df = df.div(std)
return df, me, st
def get_number_days(self):
"""
number of days
"""
return len(self.Xdf_array_list)
def get_day_length(self, day_index=0):
"""
Find out how many index entries are available for this day
"""
arr = self.Xdf_array_list[day_index]
return len(arr)
def get_sequence(self, day_index=0, line_id=None):
"""
get the last sequence_length elements from the Xdf by the index id
"""
#return self.Xdf.ix[id-self.sequence_length:id].values
if day_index > len(self.Xdf_array_list):
raise ValueError
arr = self.Xdf_array_list[day_index]
return arr[line_id-self.sequence_length+1:line_id+1]
def get_bid_ask(self, day_index=0, line_id = None):
"""
returns Bid normalized, Bid, Ask normalized, Ask
"""
arr = self.XdfBidAsk_array_list[day_index]
return arr[line_id][0], arr[line_id][2], arr[line_id][1], arr[line_id][3]
def get_day(self, day_index=0):
"""
get the last sequence_length elements from the Xdf by the index id
"""
return self.Xdf_array_day[day_index]
def get_features_length(self):
return self.features_length
| mit |
ARudiuk/mne-python | examples/inverse/plot_covariance_whitening_dspm.py | 9 | 7201 | # doc:slow-example
"""
===================================================
Demonstrate impact of whitening on source estimates
===================================================
This example demonstrates the relationship between the noise covariance
estimate and the MNE / dSPM source amplitudes. It computes source estimates for
the SPM faces data and compares proper regularization with insufficient
regularization based on the methods described in [1]_. The example demonstrates
that improper regularization can lead to overestimation of source amplitudes.
This example makes use of the previous, non-optimized code path that was used
before implementing the suggestions presented in [1]_. Please do not copy the
patterns presented here for your own analysis, this is example is purely
illustrative.
.. note:: This example does quite a bit of processing, so even on a
fast machine it can take a couple of minutes to complete.
References
----------
.. [1] Engemann D. and Gramfort A. (2015) Automated model selection in
covariance estimation and spatial whitening of MEG and EEG signals,
vol. 108, 328-342, NeuroImage.
"""
# Author: Denis A. Engemann <[email protected]>
#
# License: BSD (3-clause)
import os
import os.path as op
import numpy as np
from scipy.misc import imread
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import spm_face
from mne.minimum_norm import apply_inverse, make_inverse_operator
from mne.cov import compute_covariance
print(__doc__)
##############################################################################
# Get data
data_path = spm_face.data_path()
subjects_dir = data_path + '/subjects'
raw_fname = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces%d_3D.ds'
raw = io.read_raw_ctf(raw_fname % 1) # Take first run
# To save time and memory for this demo, we'll just use the first
# 2.5 minutes (all we need to get 30 total events) and heavily
# resample 480->60 Hz (usually you wouldn't do either of these!)
raw = raw.crop(0, 150.).load_data().resample(60, npad='auto')
picks = mne.pick_types(raw.info, meg=True, exclude='bads')
raw.filter(1, None, method='iir', n_jobs=1)
events = mne.find_events(raw, stim_channel='UPPT001')
event_ids = {"faces": 1, "scrambled": 2}
tmin, tmax = -0.2, 0.5
baseline = None # no baseline as high-pass is applied
reject = dict(mag=3e-12)
# Make source space
trans = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces1_3D_raw-trans.fif'
src = mne.setup_source_space('spm', fname=None, spacing='oct6',
subjects_dir=subjects_dir, add_dist=False)
bem = data_path + '/subjects/spm/bem/spm-5120-5120-5120-bem-sol.fif'
forward = mne.make_forward_solution(raw.info, trans, src, bem)
forward = mne.convert_forward_solution(forward, surf_ori=True)
del src
# inverse parameters
conditions = 'faces', 'scrambled'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = 'dSPM'
clim = dict(kind='value', lims=[0, 2.5, 5])
###############################################################################
# Estimate covariances
samples_epochs = 5, 15,
method = 'empirical', 'shrunk'
colors = 'steelblue', 'red'
evokeds = list()
stcs = list()
methods_ordered = list()
for n_train in samples_epochs:
# estimate covs based on a subset of samples
# make sure we have the same number of conditions.
events_ = np.concatenate([events[events[:, 2] == id_][:n_train]
for id_ in [event_ids[k] for k in conditions]])
epochs_train = mne.Epochs(raw, events_, event_ids, tmin, tmax, picks=picks,
baseline=baseline, preload=True, reject=reject)
epochs_train.equalize_event_counts(event_ids, copy=False)
assert len(epochs_train) == 2 * n_train
noise_covs = compute_covariance(
epochs_train, method=method, tmin=None, tmax=0, # baseline only
return_estimators=True) # returns list
# prepare contrast
evokeds = [epochs_train[k].average() for k in conditions]
del epochs_train, events_
# do contrast
# We skip empirical rank estimation that we introduced in response to
# the findings in reference [1] to use the naive code path that
# triggered the behavior described in [1]. The expected true rank is
# 274 for this dataset. Please do not do this with your data but
# rely on the default rank estimator that helps regularizing the
# covariance.
stcs.append(list())
methods_ordered.append(list())
for cov in noise_covs:
inverse_operator = make_inverse_operator(evokeds[0].info, forward,
cov, loose=0.2, depth=0.8,
rank=274)
stc_a, stc_b = (apply_inverse(e, inverse_operator, lambda2, "dSPM",
pick_ori=None) for e in evokeds)
stc = stc_a - stc_b
methods_ordered[-1].append(cov['method'])
stcs[-1].append(stc)
del inverse_operator, evokeds, cov, noise_covs, stc, stc_a, stc_b
del raw, forward # save some memory
##############################################################################
# Show the resulting source estimates
fig, (axes1, axes2) = plt.subplots(2, 3, figsize=(9.5, 6))
def brain_to_mpl(brain):
"""convert image to be usable with matplotlib"""
tmp_path = op.abspath(op.join(op.curdir, 'my_tmp'))
brain.save_imageset(tmp_path, views=['ven'])
im = imread(tmp_path + '_ven.png')
os.remove(tmp_path + '_ven.png')
return im
for ni, (n_train, axes) in enumerate(zip(samples_epochs, (axes1, axes2))):
# compute stc based on worst and best
ax_dynamics = axes[1]
for stc, ax, method, kind, color in zip(stcs[ni],
axes[::2],
methods_ordered[ni],
['best', 'worst'],
colors):
brain = stc.plot(subjects_dir=subjects_dir, hemi='both', clim=clim)
brain.set_time(175)
im = brain_to_mpl(brain)
brain.close()
del brain
ax.axis('off')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.imshow(im)
ax.set_title('{0} ({1} epochs)'.format(kind, n_train * 2))
# plot spatial mean
stc_mean = stc.data.mean(0)
ax_dynamics.plot(stc.times * 1e3, stc_mean,
label='{0} ({1})'.format(method, kind),
color=color)
# plot spatial std
stc_var = stc.data.std(0)
ax_dynamics.fill_between(stc.times * 1e3, stc_mean - stc_var,
stc_mean + stc_var, alpha=0.2, color=color)
# signal dynamics worst and best
ax_dynamics.set_title('{0} epochs'.format(n_train * 2))
ax_dynamics.set_xlabel('Time (ms)')
ax_dynamics.set_ylabel('Source Activation (dSPM)')
ax_dynamics.set_xlim(tmin * 1e3, tmax * 1e3)
ax_dynamics.set_ylim(-3, 3)
ax_dynamics.legend(loc='upper left', fontsize=10)
fig.subplots_adjust(hspace=0.4, left=0.03, right=0.98, wspace=0.07)
fig.canvas.draw()
fig.show()
| bsd-3-clause |
chris-chris/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/transforms/in_memory_source.py | 82 | 6157 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sources for numpy arrays and pandas DataFrames."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions
class BaseInMemorySource(transform.TensorFlowTransform):
"""Abstract parent class for NumpySource and PandasSource."""
def __init__(self,
data,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="in_memory_data"):
super(BaseInMemorySource, self).__init__()
self._data = data
self._num_threads = 1 if num_threads is None else num_threads
self._batch_size = (32 if batch_size is None else batch_size)
self._enqueue_size = max(1, int(self._batch_size / self._num_threads)
) if enqueue_size is None else enqueue_size
self._queue_capacity = (self._batch_size * 10 if queue_capacity is None else
queue_capacity)
self._shuffle = shuffle
self._min_after_dequeue = (batch_size if min_after_dequeue is None else
min_after_dequeue)
self._seed = seed
self._data_name = data_name
@transform.parameter
def data(self):
return self._data
@transform.parameter
def num_threads(self):
return self._num_threads
@transform.parameter
def enqueue_size(self):
return self._enqueue_size
@transform.parameter
def batch_size(self):
return self._batch_size
@transform.parameter
def queue_capacity(self):
return self._queue_capacity
@transform.parameter
def shuffle(self):
return self._shuffle
@transform.parameter
def min_after_dequeue(self):
return self._min_after_dequeue
@transform.parameter
def seed(self):
return self._seed
@transform.parameter
def data_name(self):
return self._data_name
@property
def input_valency(self):
return 0
def _apply_transform(self, transform_input, **kwargs):
queue = feeding_functions.enqueue_data(self.data,
self.queue_capacity,
self.shuffle,
self.min_after_dequeue,
num_threads=self.num_threads,
seed=self.seed,
name=self.data_name,
enqueue_size=self.enqueue_size,
num_epochs=kwargs.get("num_epochs"))
dequeued = queue.dequeue_many(self.batch_size)
# TODO(jamieas): dequeue and dequeue_many will soon return a list regardless
# of the number of enqueued tensors. Remove the following once that change
# is in place.
if not isinstance(dequeued, (tuple, list)):
dequeued = (dequeued,)
# pylint: disable=not-callable
return self.return_type(*dequeued)
class NumpySource(BaseInMemorySource):
"""A zero-input Transform that produces a single column from a numpy array."""
@property
def name(self):
return "NumpySource"
@property
def _output_names(self):
return ("index", "value")
class OrderedDictNumpySource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a dict of numpy arrays."""
def __init__(self,
ordered_dict_of_arrays,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in ordered_dict_of_arrays.keys():
raise ValueError("Column name `index` is reserved.")
super(OrderedDictNumpySource, self).__init__(ordered_dict_of_arrays,
num_threads, enqueue_size,
batch_size, queue_capacity,
shuffle, min_after_dequeue,
seed, data_name)
@property
def name(self):
return "OrderedDictNumpySource"
@property
def _output_names(self):
return tuple(["index"] + list(self._data.keys()))
class PandasSource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a DataFrame."""
def __init__(self,
dataframe,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in dataframe.columns:
raise ValueError("Column name `index` is reserved.")
super(PandasSource, self).__init__(dataframe, num_threads, enqueue_size,
batch_size, queue_capacity, shuffle,
min_after_dequeue, seed, data_name)
@property
def name(self):
return "PandasSource"
@property
def _output_names(self):
return tuple(["index"] + self._data.columns.tolist())
| apache-2.0 |
ChristianSch/skml | test/test_br.py | 1 | 1816 | from chai import Chai
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.metrics import hamming_loss
import numpy as np
import scipy.sparse as sp
from skml.problem_transformation import BinaryRelevance
from skml.datasets import load_dataset
X, y = load_dataset('yeast')
class TestBR(Chai):
def test_br_fit_predict(self):
clf = BinaryRelevance(RandomForestClassifier())
clf.fit(X, y)
y_pred = clf.predict(X)
hamming_loss(y, y_pred)
def test_br_pipeline(self):
pl = Pipeline([("br", BinaryRelevance(RandomForestClassifier()))])
pl.fit(X, y)
def test_br_gridsearch(self):
br = BinaryRelevance(RandomForestClassifier())
cv = GridSearchCV(br,
{'estimator__n_estimators': [10, 20]},
n_jobs=-1)
cv.fit(X, y)
def test_br_always_present(self):
# Test that br works with classes that are always present or absent.
clf = BinaryRelevance(RandomForestClassifier())
X_2 = np.array([[2, 3], [4, 0]])
y_2 = np.array([[1, 1], [1, 0]])
clf.fit(X, y)
def test_br_predict_multi_instances(self):
clf = BinaryRelevance(RandomForestClassifier())
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(y_pred.shape[0] == y.shape[0])
def test_br_fit_predict_sparse(self):
# test fit/predict of sparse matrices
for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix,
sp.dok_matrix, sp.lil_matrix]:
clf = BinaryRelevance(RandomForestClassifier())
clf.fit(X, sparse(y))
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
| mit |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.