repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
manashmndl/scikit-learn | sklearn/utils/tests/test_class_weight.py | 140 | 11909 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# cuplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
cython-testbed/pandas | pandas/tests/tslibs/test_liboffsets.py | 4 | 6794 | # -*- coding: utf-8 -*-
"""
Tests for helper functions in the cython tslibs.offsets
"""
from datetime import datetime
import pytest
from pandas import Timestamp
import pandas._libs.tslibs.offsets as liboffsets
from pandas._libs.tslibs.offsets import roll_qtrday
def test_get_lastbday():
dt = datetime(2017, 11, 30)
assert dt.weekday() == 3 # i.e. this is a business day
assert liboffsets.get_lastbday(dt.year, dt.month) == 30
dt = datetime(1993, 10, 31)
assert dt.weekday() == 6 # i.e. this is not a business day
assert liboffsets.get_lastbday(dt.year, dt.month) == 29
def test_get_firstbday():
dt = datetime(2017, 4, 1)
assert dt.weekday() == 5 # i.e. not a weekday
assert liboffsets.get_firstbday(dt.year, dt.month) == 3
dt = datetime(1993, 10, 1)
assert dt.weekday() == 4 # i.e. a business day
assert liboffsets.get_firstbday(dt.year, dt.month) == 1
def test_shift_month():
dt = datetime(2017, 11, 30)
assert liboffsets.shift_month(dt, 0, 'business_end') == dt
assert liboffsets.shift_month(dt, 0,
'business_start') == datetime(2017, 11, 1)
ts = Timestamp('1929-05-05')
assert liboffsets.shift_month(ts, 1, 'start') == Timestamp('1929-06-01')
assert liboffsets.shift_month(ts, -3, 'end') == Timestamp('1929-02-28')
assert liboffsets.shift_month(ts, 25, None) == Timestamp('1931-06-5')
# Try to shift to April 31, then shift back to Apr 30 to get a real date
assert liboffsets.shift_month(ts, -1, 31) == Timestamp('1929-04-30')
dt = datetime(2017, 11, 15)
assert liboffsets.shift_month(dt, 0, day_opt=None) == dt
assert liboffsets.shift_month(dt, 0, day_opt=15) == dt
assert liboffsets.shift_month(dt, 1,
day_opt='start') == datetime(2017, 12, 1)
assert liboffsets.shift_month(dt, -145,
day_opt='end') == datetime(2005, 10, 31)
with pytest.raises(ValueError):
liboffsets.shift_month(dt, 3, day_opt='this should raise')
def test_get_day_of_month():
# get_day_of_month is not directly exposed; we test it via roll_yearday
dt = datetime(2017, 11, 15)
with pytest.raises(ValueError):
# To hit the raising case we need month == dt.month and n > 0
liboffsets.roll_yearday(dt, n=3, month=11, day_opt='foo')
def test_roll_yearday():
# Copied from doctest examples
month = 3
day_opt = 'start' # `other` will be compared to March 1
other = datetime(2017, 2, 10) # before March 1
assert liboffsets.roll_yearday(other, 2, month, day_opt) == 1
assert liboffsets.roll_yearday(other, -7, month, day_opt) == -7
assert liboffsets.roll_yearday(other, 0, month, day_opt) == 0
other = Timestamp('2014-03-15', tz='US/Eastern') # after March 1
assert liboffsets.roll_yearday(other, 2, month, day_opt) == 2
assert liboffsets.roll_yearday(other, -7, month, day_opt) == -6
assert liboffsets.roll_yearday(other, 0, month, day_opt) == 1
month = 6
day_opt = 'end' # `other` will be compared to June 30
other = datetime(1999, 6, 29) # before June 30
assert liboffsets.roll_yearday(other, 5, month, day_opt) == 4
assert liboffsets.roll_yearday(other, -7, month, day_opt) == -7
assert liboffsets.roll_yearday(other, 0, month, day_opt) == 0
other = Timestamp(2072, 8, 24, 6, 17, 18) # after June 30
assert liboffsets.roll_yearday(other, 5, month, day_opt) == 5
assert liboffsets.roll_yearday(other, -7, month, day_opt) == -6
assert liboffsets.roll_yearday(other, 0, month, day_opt) == 1
def test_roll_qtrday():
other = Timestamp(2072, 10, 1, 6, 17, 18) # Saturday
for day_opt in ['start', 'end', 'business_start', 'business_end']:
# as long as (other.month % 3) != (month % 3), day_opt is irrelevant
# the `day_opt` doesn't matter.
month = 5 # (other.month % 3) < (month % 3)
assert roll_qtrday(other, 4, month, day_opt, modby=3) == 3
assert roll_qtrday(other, -3, month, day_opt, modby=3) == -3
month = 3 # (other.month % 3) > (month % 3)
assert roll_qtrday(other, 4, month, day_opt, modby=3) == 4
assert roll_qtrday(other, -3, month, day_opt, modby=3) == -2
month = 2
other = datetime(1999, 5, 31) # Monday
# has (other.month % 3) == (month % 3)
n = 2
assert roll_qtrday(other, n, month, 'start', modby=3) == n
assert roll_qtrday(other, n, month, 'end', modby=3) == n
assert roll_qtrday(other, n, month, 'business_start', modby=3) == n
assert roll_qtrday(other, n, month, 'business_end', modby=3) == n
n = -1
assert roll_qtrday(other, n, month, 'start', modby=3) == n + 1
assert roll_qtrday(other, n, month, 'end', modby=3) == n
assert roll_qtrday(other, n, month, 'business_start', modby=3) == n + 1
assert roll_qtrday(other, n, month, 'business_end', modby=3) == n
other = Timestamp(2072, 10, 1, 6, 17, 18) # Saturday
month = 4 # (other.month % 3) == (month % 3)
n = 2
assert roll_qtrday(other, n, month, 'start', modby=3) == n
assert roll_qtrday(other, n, month, 'end', modby=3) == n - 1
assert roll_qtrday(other, n, month, 'business_start', modby=3) == n - 1
assert roll_qtrday(other, n, month, 'business_end', modby=3) == n - 1
n = -1
assert roll_qtrday(other, n, month, 'start', modby=3) == n
assert roll_qtrday(other, n, month, 'end', modby=3) == n
assert roll_qtrday(other, n, month, 'business_start', modby=3) == n
assert roll_qtrday(other, n, month, 'business_end', modby=3) == n
other = Timestamp(2072, 10, 3, 6, 17, 18) # First businessday
month = 4 # (other.month % 3) == (month % 3)
n = 2
assert roll_qtrday(other, n, month, 'start', modby=3) == n
assert roll_qtrday(other, n, month, 'end', modby=3) == n - 1
assert roll_qtrday(other, n, month, 'business_start', modby=3) == n
assert roll_qtrday(other, n, month, 'business_end', modby=3) == n - 1
n = -1
assert roll_qtrday(other, n, month, 'start', modby=3) == n + 1
assert roll_qtrday(other, n, month, 'end', modby=3) == n
assert roll_qtrday(other, n, month, 'business_start', modby=3) == n
assert roll_qtrday(other, n, month, 'business_end', modby=3) == n
def test_roll_convention():
other = 29
before = 1
after = 31
n = 42
assert liboffsets.roll_convention(other, n, other) == n
assert liboffsets.roll_convention(other, n, before) == n
assert liboffsets.roll_convention(other, n, after) == n - 1
n = -4
assert liboffsets.roll_convention(other, n, other) == n
assert liboffsets.roll_convention(other, n, before) == n + 1
assert liboffsets.roll_convention(other, n, after) == n
| bsd-3-clause |
apavlo/h-store | graphs/eviction-overhead.py | 4 | 2079 | #!/usr/bin/env python
import os
import sys
import re
import logging
import fnmatch
import string
import argparse
import pylab
import numpy as np
import matplotlib.pyplot as plot
from matplotlib.font_manager import FontProperties
from matplotlib.ticker import MaxNLocator
from pprint import pprint,pformat
from options import *
import graphutil
import datautil
## ==============================================
## LOGGING CONFIGURATION
## ==============================================
LOG = logging.getLogger(__name__)
LOG_handler = logging.StreamHandler()
LOG_formatter = logging.Formatter(
fmt='%(asctime)s [%(funcName)s:%(lineno)03d] %(levelname)-5s: %(message)s',
datefmt='%m-%d-%Y %H:%M:%S'
)
LOG_handler.setFormatter(LOG_formatter)
LOG.addHandler(LOG_handler)
LOG.setLevel(logging.INFO)
## ==============================================
## CONFIGURATION
## ==============================================
def computeEvictionStats(dataFile):
colMap, csvData = datautil.getCSVData(dataFile)
if len(csvData) == 0: return
allTimes = [ ]
allTuples = [ ]
allBlocks = [ ]
allBytes = [ ]
for row in csvData:
allTimes.append(row[colMap["STOP"]] - row[colMap["START"]])
allTuples.append(int(row[colMap["TUPLES_EVICTED"]]))
allBlocks.append(int(row[colMap["TUPLES_EVICTED"]]))
allBytes.append(int(row[colMap["BYTES_EVICTED"]]))
print dataFile
print " Average Time: %.2f ms" % np.mean(allTimes)
print " Average Tuples: %.2f" % np.mean(allTuples)
print " Average Blocks: %.2f" % np.mean(allBlocks)
print " Average Bytes: %.2f MB" % (np.mean(allBytes)/float(1024*1024))
print
# DEF
## ==============================================
## main
## ==============================================
if __name__ == '__main__':
matches = []
for root, dirnames, filenames in os.walk(OPT_DATA_HSTORE):
for filename in fnmatch.filter(filenames, 'evictions.csv'):
matches.append(os.path.join(root, filename))
map(computeEvictionStats, matches)
## MAIN | gpl-3.0 |
tdhopper/scikit-learn | sklearn/metrics/cluster/__init__.py | 312 | 1322 | """
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"entropy", "silhouette_samples", "silhouette_score",
"consensus_score"]
| bsd-3-clause |
irwenqiang/pyensemble | ensemble.py | 3 | 23046 | # Author: David C. Lambert [dcl -at- panix -dot- com]
# Copyright(c) 2013
# License: Simple BSD
"""
The :mod:`ensemble` module implements the ensemble selection
technique of Caruana et al [1][2].
Currently supports f1, auc, rmse, accuracy and mean cross entropy scores
for hillclimbing. Based on numpy, scipy, sklearn and sqlite.
Work in progress.
References
----------
.. [1] Caruana, et al, "Ensemble Selection from Libraries of Rich Models",
Proceedings of the 21st International Conference on Machine Learning
(ICML `04).
.. [2] Caruana, et al, "Getting the Most Out of Ensemble Selection",
Proceedings of the 6th International Conference on Data Mining
(ICDM `06).
"""
import os
import sys
import sqlite3
import numpy as np
from math import sqrt
from cPickle import loads, dumps
from collections import Counter
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.metrics import f1_score, roc_auc_score
from sklearn.metrics import mean_squared_error, accuracy_score
from sklearn.cross_validation import StratifiedKFold
from sklearn.preprocessing import LabelBinarizer
def _f1(y, y_bin, probs):
"""return f1 score"""
return f1_score(y, np.argmax(probs, axis=1))
def _auc(y, y_bin, probs):
"""return AUC score (for binary problems only)"""
return roc_auc_score(y, probs[:, 1])
def _rmse(y, y_bin, probs):
"""return 1-rmse since we're maximizing the score for hillclimbing"""
return 1.0 - sqrt(mean_squared_error(y_bin, probs))
def _accuracy(y, y_bin, probs):
"""return accuracy score"""
return accuracy_score(y, np.argmax(probs, axis=1))
def _mxentropy(y, y_bin, probs):
"""return negative mean cross entropy since we're maximizing the score
for hillclimbing"""
# clip away from extremes to avoid under/overflows
eps = 1.0e-7
clipped = np.clip(probs, eps, 1.0 - eps)
clipped /= clipped.sum(axis=1)[:, np.newaxis]
return (y_bin * np.log(clipped)).sum() / y.shape[0]
def _bootstraps(n, rs):
"""return bootstrap sample indices for given n"""
bs_inds = rs.randint(n, size=(n))
return bs_inds, np.setdiff1d(range(n), bs_inds)
class EnsembleSelectionClassifier(BaseEstimator, ClassifierMixin):
"""Caruana-style ensemble selection [1][2]
Parameters:
-----------
`db_file` : string
Name of file for sqlite db backing store.
`models` : list or None
List of classifiers following sklearn fit/predict API, if None
fitted models are loaded from the specified database.
`n_best` : int (default: 5)
Number of top models in initial ensemble.
`n_folds` : int (default: 3)
Number of internal cross-validation folds.
`bag_fraction` : float (default: 0.25)
Fraction of (post-pruning) models to randomly select for each bag.
`prune_fraction` : float (default: 0.8)
Fraction of worst models to prune before ensemble selection.
`score_metric` : string (default: 'accuracy')
Score metric to use when hillclimbing. Must be one of
'accuracy', 'xentropy', 'rmse', 'f1'.
`epsilon` : float (default: 0.01)
Minimum score improvement to add model to ensemble. Ignored
if use_epsilon is False.
`max_models` : int (default: 50)
Maximum number of models to include in an ensemble.
`verbose` : boolean (default: False)
Turn on verbose messages.
`use_bootstrap`: boolean (default: False)
If True, use bootstrap sample of entire dataset for fitting, and
oob samples for hillclimbing for each internal CV fold instead
of StratifiedKFolds
`use_epsilon` : boolean (default: False)
If True, candidates models are added to ensembles until the value
of the score_metric fails to improve by the value of the epsilon
parameter. If False, models are added until the number of models
in the cadidate ensemble reaches the value of the max_models
parameter.
`random_state` : int, RandomState instance or None (default=None)
Control the pseudo random number generator used to select
candidates for each bag.
References
----------
.. [1] Caruana, et al, "Ensemble Selection from Libraries of Rich Models",
Proceedings of the 21st International Conference on Machine Learning
(ICML `04).
.. [2] Caruana, et al, "Getting the Most Out of Ensemble Selection",
Proceedings of the 6th International Conference on Data Mining
(ICDM `06).
"""
_metrics = {
'f1': _f1,
'auc': _auc,
'rmse': _rmse,
'accuracy': _accuracy,
'xentropy': _mxentropy,
}
def __init__(self, db_file=None,
models=None, n_best=5, n_folds=3,
n_bags=20, bag_fraction=0.25,
prune_fraction=0.8,
score_metric='accuracy',
epsilon=0.01, max_models=50,
use_epsilon=False, use_bootstrap=False,
verbose=False, random_state=None):
self.db_file = db_file
self.models = models
self.n_best = n_best
self.n_bags = n_bags
self.n_folds = n_folds
self.bag_fraction = bag_fraction
self.prune_fraction = prune_fraction
self.score_metric = score_metric
self.epsilon = epsilon
self.max_models = max_models
self.use_epsilon = use_epsilon
self.use_bootstrap = use_bootstrap
self.verbose = verbose
self.random_state = random_state
self._check_params()
self._folds = None
self._n_models = 0
self._n_classes = 0
self._metric = None
self._ensemble = Counter()
self._model_scores = []
self._scored_models = []
self._fitted_models = []
self._init_db(models)
def _check_params(self):
"""Parameter sanity checks"""
if (not self.db_file):
msg = "db_file parameter is required"
raise ValueError(msg)
if (self.epsilon < 0.0):
msg = "epsilon must be >= 0.0"
raise ValueError(msg)
metric_names = self._metrics.keys()
if (self.score_metric not in metric_names):
msg = "score_metric not in %s" % metric_names
raise ValueError(msg)
if (self.n_best < 1):
msg = "n_best must be >= 1"
raise ValueError(msg)
if (self.max_models < self.n_best):
msg = "max_models must be >= n_best"
raise ValueError(msg)
if (not self.use_bootstrap):
if (self.n_folds < 2):
msg = "n_folds must be >= 2 for StratifiedKFolds"
raise ValueError(msg)
else:
if (self.n_folds < 1):
msg = "n_folds must be >= 1 with bootstrap"
raise ValueError(msg)
def _init_db(self, models):
"""Initialize database"""
# db setup script
_createTablesScript = """
create table models (
model_idx integer UNIQUE NOT NULL,
pickled_model blob NOT NULL
);
create table fitted_models (
model_idx integer NOT NULL,
fold_idx integer NOT NULL,
pickled_model blob NOT NULL
);
create table model_scores (
model_idx integer UNIQUE NOT NULL,
score real NOT NULL,
probs blob NOT NULL
);
create table ensemble (
model_idx integer NOT NULL,
weight integer NOT NULL
);
"""
# barf if db file exists and we're making a new model
if (models and os.path.exists(self.db_file)):
raise ValueError("db_file '%s' already exists!" % self.db_file)
db_conn = sqlite3.connect(self.db_file)
with db_conn:
db_conn.execute("pragma journal_mode = off")
if (models):
# build database
with db_conn:
db_conn.executescript(_createTablesScript)
# populate model table
insert_stmt = """insert into models (model_idx, pickled_model)
values (?, ?)"""
with db_conn:
vals = ((i, buffer(dumps(m))) for i, m in enumerate(models))
db_conn.executemany(insert_stmt, vals)
create_stmt = "create index models_index on models (model_idx)"
db_conn.execute(create_stmt)
self._n_models = len(models)
else:
curs = db_conn.cursor()
curs.execute("select count(*) from models")
self._n_models = curs.fetchone()[0]
curs.execute("select model_idx, weight from ensemble")
for k, v in curs.fetchall():
self._ensemble[k] = v
# clumsy hack to get n_classes
curs.execute("select probs from model_scores limit 1")
r = curs.fetchone()
probs = loads(str(r[0]))
self._n_classes = probs.shape[1]
db_conn.close()
def fit(self, X, y):
"""Perform model fitting and ensemble building"""
self.fit_models(X, y)
self.build_ensemble(X, y)
return self
def fit_models(self, X, y):
"""Perform internal cross-validation fit"""
if (self.verbose):
sys.stderr.write('\nfitting models:\n')
if (self.use_bootstrap):
n = X.shape[0]
rs = check_random_state(self.random_state)
self._folds = [_bootstraps(n, rs) for _ in xrange(self.n_folds)]
else:
self._folds = list(StratifiedKFold(y, n_folds=self.n_folds))
select_stmt = "select pickled_model from models where model_idx = ?"
insert_stmt = """insert into fitted_models
(model_idx, fold_idx, pickled_model)
values (?,?,?)"""
db_conn = sqlite3.connect(self.db_file)
curs = db_conn.cursor()
for model_idx in xrange(self._n_models):
curs.execute(select_stmt, [model_idx])
pickled_model = curs.fetchone()[0]
model = loads(str(pickled_model))
model_folds = []
for fold_idx, fold in enumerate(self._folds):
train_inds, _ = fold
model.fit(X[train_inds], y[train_inds])
pickled_model = buffer(dumps(model))
model_folds.append((model_idx, fold_idx, pickled_model))
with db_conn:
db_conn.executemany(insert_stmt, model_folds)
if (self.verbose):
if ((model_idx + 1) % 50 == 0):
sys.stderr.write('%d\n' % (model_idx + 1))
else:
sys.stderr.write('.')
if (self.verbose):
sys.stderr.write('\n')
with db_conn:
stmt = """create index fitted_models_index
on fitted_models (model_idx, fold_idx)"""
db_conn.execute(stmt)
db_conn.close()
def _score_models(self, db_conn, X, y, y_bin):
"""Get cross-validated test scores for each model"""
self._metric = self._metrics[self.score_metric]
if (self.verbose):
sys.stderr.write('\nscoring models:\n')
insert_stmt = """insert into model_scores (model_idx, score, probs)
values (?,?,?)"""
select_stmt = """select pickled_model
from fitted_models
where model_idx = ? and fold_idx = ?"""
# nuke existing scores
with db_conn:
stmt = """drop index if exists model_scores_index;
delete from model_scores;"""
db_conn.executescript(stmt)
curs = db_conn.cursor()
# build probs array using the test sets for each internal CV fold
for model_idx in xrange(self._n_models):
probs = np.zeros((len(X), self._n_classes))
for fold_idx, fold in enumerate(self._folds):
_, test_inds = fold
curs.execute(select_stmt, [model_idx, fold_idx])
res = curs.fetchone()
model = loads(str(res[0]))
probs[test_inds] = model.predict_proba(X[test_inds])
score = self._metric(y, y_bin, probs)
# save score and probs array
with db_conn:
vals = (model_idx, score, buffer(dumps(probs)))
db_conn.execute(insert_stmt, vals)
if (self.verbose):
if ((model_idx + 1) % 50 == 0):
sys.stderr.write('%d\n' % (model_idx + 1))
else:
sys.stderr.write('.')
if (self.verbose):
sys.stderr.write('\n')
with db_conn:
stmt = """create index model_scores_index
on model_scores (model_idx)"""
db_conn.execute(stmt)
def _get_ensemble_score(self, db_conn, ensemble, y, y_bin):
"""Get score for model ensemble"""
n_models = float(sum(ensemble.values()))
ensemble_probs = np.zeros((len(y), self._n_classes))
curs = db_conn.cursor()
select_stmt = """select model_idx, probs
from model_scores
where model_idx in %s"""
in_str = str(tuple(ensemble)).replace(',)', ')')
curs.execute(select_stmt % in_str)
for row in curs.fetchall():
model_idx, probs = row
probs = loads(str(probs))
weight = ensemble[model_idx]
ensemble_probs += probs * weight
ensemble_probs /= n_models
score = self._metric(y, y_bin, ensemble_probs)
return score, ensemble_probs
def _score_with_model(self, db_conn, y, y_bin, probs, n_models, model_idx):
"""compute ensemble score with specified model added"""
curs = db_conn.cursor()
select_stmt = """select probs
from model_scores
where model_idx = %d"""
curs.execute(select_stmt % model_idx)
row = curs.fetchone()
n_models = float(n_models)
new_probs = loads(str(row[0]))
new_probs = (probs*n_models + new_probs)/(n_models + 1.0)
score = self._metric(y, y_bin, new_probs)
return score, new_probs
def _ensemble_from_candidates(self, db_conn, candidates, y, y_bin):
"""Build an ensemble from a list of candidate models"""
ensemble = Counter(candidates[:self.n_best])
ens_score, ens_probs = self._get_ensemble_score(db_conn,
ensemble,
y, y_bin)
ens_count = sum(ensemble.values())
if (self.verbose):
sys.stderr.write('%02d/%.3f ' % (ens_count, ens_score))
cand_ensembles = []
while(ens_count < self.max_models):
# compute and collect scores after adding each candidate
new_scores = []
for new_model_idx in candidates:
score, _ = self._score_with_model(db_conn, y, y_bin,
ens_probs, ens_count,
new_model_idx)
new_scores.append({'score': score,
'new_model_idx': new_model_idx})
new_scores.sort(key=lambda x: x['score'], reverse=True)
last_ens_score = ens_score
ens_score = new_scores[0]['score']
if (self.use_epsilon):
# if score improvement is less than epsilon,
# don't add the new model and stop
score_diff = ens_score - last_ens_score
if (score_diff < self.epsilon):
break
new_model_idx = new_scores[0]['new_model_idx']
ensemble.update({new_model_idx: 1})
_, ens_probs = self._score_with_model(db_conn, y, y_bin,
ens_probs, ens_count,
new_model_idx)
if (not self.use_epsilon):
# store current ensemble to select best later
ens_copy = Counter(ensemble)
cand = {'ens': ens_copy, 'score': ens_score}
cand_ensembles.append(cand)
ens_count = sum(ensemble.values())
if (self.verbose):
if ((ens_count - self.n_best) % 8 == 0):
sys.stderr.write("\n ")
msg = '%02d/%.3f ' % (ens_count, ens_score)
sys.stderr.write(msg)
if (self.verbose):
sys.stderr.write('\n')
if (not self.use_epsilon and ens_count == self.max_models):
cand_ensembles.sort(key=lambda x: x['score'], reverse=True)
ensemble = cand_ensembles[0]['ens']
return ensemble
def _get_best_model(self, curs):
"""perform query for best scoring model"""
select_stmt = """select model_idx, pickled_model
from models
where model_idx =
(select model_idx
from model_scores
order by score desc
limit 1)"""
curs.execute(select_stmt)
row = curs.fetchone()
return row[0], loads(str(row[1]))
def best_model(self):
"""Returns best model found after CV scoring"""
db_conn = sqlite3.connect(self.db_file)
_, model = self._get_best_model(db_conn.cursor())
db_conn.close()
return model
def _print_best_results(self, curs, best_model_score):
"""Show best model and score"""
sys.stderr.write('Best model CV score: %.5f\n' % best_model_score)
_, best_model = self._get_best_model(curs)
sys.stderr.write('Best model: %s\n\n' % repr(best_model))
def build_ensemble(self, X, y, rescore=True):
"""Generate bagged ensemble"""
self._n_classes = len(np.unique(y))
db_conn = sqlite3.connect(self.db_file)
curs = db_conn.cursor()
# binarize
if (self._n_classes > 2):
y_bin = LabelBinarizer().fit_transform(y)
else:
y_bin = np.column_stack((1-y, y))
# get CV scores for fitted models
if (rescore):
self._score_models(db_conn, X, y, y_bin)
# get number of best models to take
n_models = int(self._n_models * (1.0 - self.prune_fraction))
bag_size = int(self.bag_fraction * n_models)
if (self.verbose):
sys.stderr.write('%d models left after pruning\n' % n_models)
sys.stderr.write('leaving %d candidates per bag\n\n' % bag_size)
# get indices and scores from DB
select_stmt = """select model_idx, score
from model_scores
order by score desc
limit %d"""
curs.execute(select_stmt % n_models)
ranked_model_scores = [(r[0], r[1]) for r in curs.fetchall()]
# print best performing model results
best_model_score = ranked_model_scores[0][1]
if (self.verbose):
self._print_best_results(curs, best_model_score)
sys.stderr.write("Ensemble scores for each bag (size/score):\n")
ensembles = []
# make bags and ensembles
rs = check_random_state(self.random_state)
for i in xrange(self.n_bags):
# get bag_size elements at random
cand_indices = rs.permutation(n_models)[:bag_size]
# sort by rank
candidates = [ranked_model_scores[ci][0] for ci in cand_indices]
if (self.verbose):
sys.stderr.write('Bag %02d): ' % (i+1))
# build an ensemble with current candidates
ensemble = self._ensemble_from_candidates(db_conn,
candidates,
y, y_bin)
ensembles.append(ensemble)
# combine ensembles from each bag
for e in ensembles:
self._ensemble += e
# push to DB
insert_stmt = "insert into ensemble(model_idx, weight) values (?, ?)"
with db_conn:
val_gen = ((mi, w) for mi, w in self._ensemble.most_common())
db_conn.executemany(insert_stmt, val_gen)
if (self.verbose):
score, _ = self._get_ensemble_score(db_conn,
self._ensemble,
y, y_bin)
fmt = "\nFinal ensemble (%d components) CV score: %.5f\n\n"
sys.stderr.write(fmt % (sum(self._ensemble.values()), score))
db_conn.close()
def _model_predict_proba(self, X, model_idx=0):
"""Get probability predictions for a model given its index"""
db_conn = sqlite3.connect(self.db_file)
curs = db_conn.cursor()
select_stmt = """select pickled_model
from fitted_models
where model_idx = ? and fold_idx = ?"""
# average probs over each n_folds models
probs = np.zeros((len(X), self._n_classes))
for fold_idx in xrange(self.n_folds):
curs.execute(select_stmt, [model_idx, fold_idx])
res = curs.fetchone()
model = loads(str(res[0]))
probs += model.predict_proba(X)/float(self.n_folds)
db_conn.close()
return probs
def best_model_predict_proba(self, X):
"""Probability estimates for all classes (ordered by class label)
using best model"""
db_conn = sqlite3.connect(self.db_file)
best_model_idx, _ = self._get_best_model(db_conn.cursor())
db_conn.close()
return self._model_predict_proba(X, best_model_idx)
def best_model_predict(self, X):
"""Predict class labels for samples in X using best model"""
return np.argmax(self.best_model_predict_proba(X), axis=1)
def predict_proba(self, X):
"""Probability estimates for all classes (ordered by class label)"""
n_models = float(sum(self._ensemble.values()))
probs = np.zeros((len(X), self._n_classes))
for model_idx, weight in self._ensemble.items():
probs += self._model_predict_proba(X, model_idx) * weight/n_models
return probs
def predict(self, X):
"""Predict class labels for samples in X."""
return np.argmax(self.predict_proba(X), axis=1)
| bsd-3-clause |
nmayorov/scikit-learn | sklearn/gaussian_process/kernels.py | 18 | 66251 | """Kernels for Gaussian process regression and classification.
The kernels in this module allow kernel-engineering, i.e., they can be
combined via the "+" and "*" operators or be exponentiated with a scalar
via "**". These sum and product expressions can also contain scalar values,
which are automatically converted to a constant kernel.
All kernels allow (analytic) gradient-based hyperparameter optimization.
The space of hyperparameters can be specified by giving lower und upper
boundaries for the value of each hyperparameter (the search space is thus
rectangular). Instead of specifying bounds, hyperparameters can also be
declared to be "fixed", which causes these hyperparameters to be excluded from
optimization.
"""
# Author: Jan Hendrik Metzen <[email protected]>
# Licence: BSD 3 clause
# Note: this module is strongly inspired by the kernel module of the george
# package.
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import math
import numpy as np
from scipy.special import kv, gamma
from scipy.spatial.distance import pdist, cdist, squareform
from ..metrics.pairwise import pairwise_kernels
from ..externals import six
from ..base import clone
from sklearn.externals.funcsigs import signature
class Hyperparameter(namedtuple('Hyperparameter',
('name', 'value_type', 'bounds',
'n_elements', 'fixed'))):
"""A kernel hyperparameter's specification in form of a namedtuple.
Attributes
----------
name : string
The name of the hyperparameter. Note that a kernel using a
hyperparameter with name "x" must have the attributes self.x and
self.x_bounds
value_type : string
The type of the hyperparameter. Currently, only "numeric"
hyperparameters are supported.
bounds : pair of floats >= 0 or "fixed"
The lower and upper bound on the parameter. If n_elements>1, a pair
of 1d array with n_elements each may be given alternatively. If
the string "fixed" is passed as bounds, the hyperparameter's value
cannot be changed.
n_elements : int, default=1
The number of elements of the hyperparameter value. Defaults to 1,
which corresponds to a scalar hyperparameter. n_elements > 1
corresponds to a hyperparameter which is vector-valued,
such as, e.g., anisotropic length-scales.
fixed : bool, default: None
Whether the value of this hyperparameter is fixed, i.e., cannot be
changed during hyperparameter tuning. If None is passed, the "fixed" is
derived based on the given bounds.
"""
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __init__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __new__(cls, name, value_type, bounds, n_elements=1, fixed=None):
if bounds != "fixed":
bounds = np.atleast_2d(bounds)
if n_elements > 1: # vector-valued parameter
if bounds.shape[0] == 1:
bounds = np.repeat(bounds, n_elements, 0)
elif bounds.shape[0] != n_elements:
raise ValueError("Bounds on %s should have either 1 or "
"%d dimensions. Given are %d"
% (name, n_elements, bounds.shape[0]))
if fixed is None:
fixed = (bounds == "fixed")
return super(Hyperparameter, cls).__new__(
cls, name, value_type, bounds, n_elements, fixed)
class Kernel(six.with_metaclass(ABCMeta)):
"""Base class for all kernels."""
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict()
# introspect the constructor arguments to find the model parameters
# to represent
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
init_sign = signature(init)
args, varargs = [], []
for parameter in init_sign.parameters.values():
if (parameter.kind != parameter.VAR_KEYWORD and
parameter.name != 'self'):
args.append(parameter.name)
if parameter.kind == parameter.VAR_POSITIONAL:
varargs.append(parameter.name)
if len(varargs) != 0:
raise RuntimeError("scikit-learn kernels should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
for arg in args:
params[arg] = getattr(self, arg, None)
return params
def set_params(self, **params):
"""Set the parameters of this kernel.
The method works on simple kernels as well as on nested kernels.
The latter have parameters of the form ``<component>__<parameter>``
so that it's possible to update each component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for kernel %s. '
'Check the list of available parameters '
'with `kernel.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def clone_with_theta(self, theta):
"""Returns a clone of self with given hyperparameters theta. """
cloned = clone(self)
cloned.theta = theta
return cloned
@property
def n_dims(self):
"""Returns the number of non-fixed hyperparameters of the kernel."""
return self.theta.shape[0]
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter specifications."""
r = []
for attr, value in sorted(self.__dict__.items()):
if attr.startswith("hyperparameter_"):
r.append(value)
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
theta = []
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
theta.append(getattr(self, hyperparameter.name))
if len(theta) > 0:
return np.log(np.hstack(theta))
else:
return np.array([])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
i = 0
for hyperparameter in self.hyperparameters:
if hyperparameter.fixed:
continue
if hyperparameter.n_elements > 1:
# vector-valued parameter
setattr(self, hyperparameter.name,
np.exp(theta[i:i + hyperparameter.n_elements]))
i += hyperparameter.n_elements
else:
setattr(self, hyperparameter.name, np.exp(theta[i]))
i += 1
if i != len(theta):
raise ValueError("theta has not the correct number of entries."
" Should be %d; given are %d"
% (i, len(theta)))
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
bounds = []
for hyperparameter in self.hyperparameters:
if not hyperparameter.fixed:
bounds.append(hyperparameter.bounds)
if len(bounds) > 0:
return np.log(np.vstack(bounds))
else:
return np.array([])
def __add__(self, b):
if not isinstance(b, Kernel):
return Sum(self, ConstantKernel(b))
return Sum(self, b)
def __radd__(self, b):
if not isinstance(b, Kernel):
return Sum(ConstantKernel(b), self)
return Sum(b, self)
def __mul__(self, b):
if not isinstance(b, Kernel):
return Product(self, ConstantKernel(b))
return Product(self, b)
def __rmul__(self, b):
if not isinstance(b, Kernel):
return Product(ConstantKernel(b), self)
return Product(b, self)
def __pow__(self, b):
return Exponentiation(self, b)
def __eq__(self, b):
if type(self) != type(b):
return False
params_a = self.get_params()
params_b = b.get_params()
for key in set(list(params_a.keys()) + list(params_b.keys())):
if np.any(params_a.get(key, None) != params_b.get(key, None)):
return False
return True
def __repr__(self):
return "{0}({1})".format(self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.theta)))
@abstractmethod
def __call__(self, X, Y=None, eval_gradient=False):
"""Evaluate the kernel."""
@abstractmethod
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
@abstractmethod
def is_stationary(self):
"""Returns whether the kernel is stationary. """
class NormalizedKernelMixin(object):
"""Mixin for kernels which are normalized: k(X, X)=1."""
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.ones(X.shape[0])
class StationaryKernelMixin(object):
"""Mixin for kernels which are stationary: k(X, Y)= f(X-Y)."""
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return True
class CompoundKernel(Kernel):
"""Kernel which is composed of a set of other kernels."""
def __init__(self, kernels):
self.kernels = kernels
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
return dict(kernels=self.kernels)
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.hstack([kernel.theta for kernel in self.kernels])
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k_dims = self.k1.n_dims
for i, kernel in enumerate(self.kernels):
kernel.theta = theta[i * k_dims:(i + 1) * k_dims]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return np.vstack([kernel.bounds for kernel in self.kernels])
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Note that this compound kernel returns the results of all simple kernel
stacked along an additional axis.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y, n_kernels)
Kernel k(X, Y)
K_gradient : array, shape (n_samples_X, n_samples_X, n_dims, n_kernels)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K = []
K_grad = []
for kernel in self.kernels:
K_single, K_grad_single = kernel(X, Y, eval_gradient)
K.append(K_single)
K_grad.append(K_grad_single[..., np.newaxis])
return np.dstack(K), np.concatenate(K_grad, 3)
else:
return np.dstack([kernel(X, Y, eval_gradient)
for kernel in self.kernels])
def __eq__(self, b):
if type(self) != type(b) or len(self.kernels) != len(b.kernels):
return False
return np.all([self.kernels[i] == b.kernels[i]
for i in range(len(self.kernels))])
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return np.all([kernel.is_stationary() for kernel in self.kernels])
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X, n_kernels)
Diagonal of kernel k(X, X)
"""
return np.vstack([kernel.diag(X) for kernel in self.kernels]).T
class KernelOperator(Kernel):
"""Base class for all kernel operators. """
def __init__(self, k1, k2):
self.k1 = k1
self.k2 = k2
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(k1=self.k1, k2=self.k2)
if deep:
deep_items = self.k1.get_params().items()
params.update(('k1__' + k, val) for k, val in deep_items)
deep_items = self.k2.get_params().items()
params.update(('k2__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.k1.hyperparameters:
r.append(Hyperparameter("k1__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
for hyperparameter in self.k2.hyperparameters:
r.append(Hyperparameter("k2__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return np.append(self.k1.theta, self.k2.theta)
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
k1_dims = self.k1.n_dims
self.k1.theta = theta[:k1_dims]
self.k2.theta = theta[k1_dims:]
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
if self.k1.bounds.size == 0:
return self.k2.bounds
if self.k2.bounds.size == 0:
return self.k1.bounds
return np.vstack((self.k1.bounds, self.k2.bounds))
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.k1 == b.k1 and self.k2 == b.k2) \
or (self.k1 == b.k2 and self.k2 == b.k1)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.k1.is_stationary() and self.k2.is_stationary()
class Sum(KernelOperator):
"""Sum-kernel k1 + k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_sum(X, Y) = k1(X, Y) + k2(X, Y)
Parameters
----------
k1 : Kernel object
The first base-kernel of the sum-kernel
k2 : Kernel object
The second base-kernel of the sum-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 + K2, np.dstack((K1_gradient, K2_gradient))
else:
return self.k1(X, Y) + self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) + self.k2.diag(X)
def __repr__(self):
return "{0} + {1}".format(self.k1, self.k2)
class Product(KernelOperator):
"""Product-kernel k1 * k2 of two kernels k1 and k2.
The resulting kernel is defined as
k_prod(X, Y) = k1(X, Y) * k2(X, Y)
Parameters
----------
k1 : Kernel object
The first base-kernel of the product-kernel
k2 : Kernel object
The second base-kernel of the product-kernel
"""
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K1, K1_gradient = self.k1(X, Y, eval_gradient=True)
K2, K2_gradient = self.k2(X, Y, eval_gradient=True)
return K1 * K2, np.dstack((K1_gradient * K2[:, :, np.newaxis],
K2_gradient * K1[:, :, np.newaxis]))
else:
return self.k1(X, Y) * self.k2(X, Y)
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.k1.diag(X) * self.k2.diag(X)
def __repr__(self):
return "{0} * {1}".format(self.k1, self.k2)
class Exponentiation(Kernel):
"""Exponentiate kernel by given exponent.
The resulting kernel is defined as
k_exp(X, Y) = k(X, Y) ** exponent
Parameters
----------
kernel : Kernel object
The base kernel
exponent : float
The exponent for the base kernel
"""
def __init__(self, kernel, exponent):
self.kernel = kernel
self.exponent = exponent
def get_params(self, deep=True):
"""Get parameters of this kernel.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
params = dict(kernel=self.kernel, exponent=self.exponent)
if deep:
deep_items = self.kernel.get_params().items()
params.update(('kernel__' + k, val) for k, val in deep_items)
return params
@property
def hyperparameters(self):
"""Returns a list of all hyperparameter."""
r = []
for hyperparameter in self.kernel.hyperparameters:
r.append(Hyperparameter("kernel__" + hyperparameter.name,
hyperparameter.value_type,
hyperparameter.bounds,
hyperparameter.n_elements))
return r
@property
def theta(self):
"""Returns the (flattened, log-transformed) non-fixed hyperparameters.
Note that theta are typically the log-transformed values of the
kernel's hyperparameters as this representation of the search space
is more amenable for hyperparameter search, as hyperparameters like
length-scales naturally live on a log-scale.
Returns
-------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
return self.kernel.theta
@theta.setter
def theta(self, theta):
"""Sets the (flattened, log-transformed) non-fixed hyperparameters.
Parameters
----------
theta : array, shape (n_dims,)
The non-fixed, log-transformed hyperparameters of the kernel
"""
self.kernel.theta = theta
@property
def bounds(self):
"""Returns the log-transformed bounds on the theta.
Returns
-------
bounds : array, shape (n_dims, 2)
The log-transformed bounds on the kernel's hyperparameters theta
"""
return self.kernel.bounds
def __eq__(self, b):
if type(self) != type(b):
return False
return (self.kernel == b.kernel and self.exponent == b.exponent)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
if eval_gradient:
K, K_gradient = self.kernel(X, Y, eval_gradient=True)
K_gradient *= \
self.exponent * K[:, :, np.newaxis] ** (self.exponent - 1)
return K ** self.exponent, K_gradient
else:
K = self.kernel(X, Y, eval_gradient=False)
return K ** self.exponent
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.kernel.diag(X) ** self.exponent
def __repr__(self):
return "{0} ** {1}".format(self.kernel, self.exponent)
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.kernel.is_stationary()
class ConstantKernel(StationaryKernelMixin, Kernel):
"""Constant kernel.
Can be used as part of a product-kernel where it scales the magnitude of
the other factor (kernel) or as part of a sum-kernel, where it modifies
the mean of the Gaussian process.
k(x_1, x_2) = constant_value for all x_1, x_2
Parameters
----------
constant_value : float, default: 1.0
The constant value which defines the covariance:
k(x_1, x_2) = constant_value
constant_value_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on constant_value
"""
def __init__(self, constant_value=1.0, constant_value_bounds=(1e-5, 1e5)):
self.constant_value = constant_value
self.constant_value_bounds = constant_value_bounds
self.hyperparameter_constant_value = \
Hyperparameter("constant_value", "numeric", constant_value_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
Y = X
elif eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
K = self.constant_value * np.ones((X.shape[0], Y.shape[0]))
if eval_gradient:
if not self.hyperparameter_constant_value.fixed:
return (K, self.constant_value
* np.ones((X.shape[0], X.shape[0], 1)))
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.constant_value * np.ones(X.shape[0])
def __repr__(self):
return "{0:.3g}**2".format(np.sqrt(self.constant_value))
class WhiteKernel(StationaryKernelMixin, Kernel):
"""White kernel.
The main use-case of this kernel is as part of a sum-kernel where it
explains the noise-component of the signal. Tuning its parameter
corresponds to estimating the noise-level.
k(x_1, x_2) = noise_level if x_1 == x_2 else 0
Parameters
----------
noise_level : float, default: 1.0
Parameter controlling the noise level
noise_level_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on noise_level
"""
def __init__(self, noise_level=1.0, noise_level_bounds=(1e-5, 1e5)):
self.noise_level = noise_level
self.noise_level_bounds = noise_level_bounds
self.hyperparameter_noise_level = \
Hyperparameter("noise_level", "numeric", noise_level_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is not None and eval_gradient:
raise ValueError("Gradient can only be evaluated when Y is None.")
if Y is None:
K = self.noise_level * np.eye(X.shape[0])
if eval_gradient:
if not self.hyperparameter_noise_level.fixed:
return (K, self.noise_level
* np.eye(X.shape[0])[:, :, np.newaxis])
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
else:
return np.zeros((X.shape[0], Y.shape[0]))
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return self.noise_level * np.ones(X.shape[0])
def __repr__(self):
return "{0}(noise_level={1:.3g})".format(self.__class__.__name__,
self.noise_level)
class RBF(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Radial-basis function kernel (aka squared-exponential kernel).
The RBF kernel is a stationary kernel. It is also known as the
"squared exponential" kernel. It is parameterized by a length-scale
parameter length_scale>0, which can either be a scalar (isotropic variant
of the kernel) or a vector with the same number of dimensions as the inputs
X (anisotropic variant of the kernel). The kernel is given by:
k(x_i, x_j) = exp(-1 / 2 d(x_i / length_scale, x_j / length_scale)^2)
This kernel is infinitely differentiable, which implies that GPs with this
kernel as covariance function have mean square derivatives of all orders,
and are thus very smooth.
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)):
if np.iterable(length_scale):
if len(length_scale) > 1:
self.anisotropic = True
self.length_scale = np.asarray(length_scale, dtype=np.float)
else:
self.anisotropic = False
self.length_scale = float(length_scale[0])
else:
self.anisotropic = False
self.length_scale = float(length_scale)
self.length_scale_bounds = length_scale_bounds
if self.anisotropic: # anisotropic length_scale
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds,
len(length_scale))
else:
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if self.anisotropic and X.shape[1] != self.length_scale.shape[0]:
raise Exception("Anisotropic kernel must have the same number of "
"dimensions as data (%d!=%d)"
% (self.length_scale.shape[0], X.shape[1]))
if Y is None:
dists = pdist(X / self.length_scale, metric='sqeuclidean')
K = np.exp(-.5 * dists)
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / self.length_scale, Y / self.length_scale,
metric='sqeuclidean')
K = np.exp(-.5 * dists)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
return K, np.empty((X.shape[0], X.shape[0], 0))
elif not self.anisotropic or self.length_scale.shape[0] == 1:
K_gradient = \
(K * squareform(dists))[:, :, np.newaxis]
return K, K_gradient
elif self.anisotropic:
# We need to recompute the pairwise dimension-wise distances
K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 \
/ (self.length_scale ** 2)
K_gradient *= K[..., np.newaxis]
return K, K_gradient
else:
raise Exception("Anisotropic kernels require that the number "
"of length scales and features match.")
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}])".format(
self.__class__.__name__, ", ".join(map("{0:.3g}".format,
self.length_scale)))
else: # isotropic
return "{0}(length_scale={1:.3g})".format(
self.__class__.__name__, self.length_scale)
class Matern(RBF):
""" Matern kernel.
The class of Matern kernels is a generalization of the RBF and the
absolute exponential kernel parameterized by an additional parameter
nu. The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for nu=0.5
to the absolute exponential kernel. Important intermediate values are
nu=1.5 (once differentiable functions) and nu=2.5 (twice differentiable
functions).
See Rasmussen and Williams 2006, pp84 for details regarding the
different variants of the Matern kernel.
Parameters
-----------
length_scale : float or array with shape (n_features,), default: 1.0
The length scale of the kernel. If a float, an isotropic kernel is
used. If an array, an anisotropic kernel is used where each dimension
of l defines the length-scale of the respective feature dimension.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
nu: float, default: 1.5
The parameter nu controlling the smoothness of the learned function.
The smaller nu, the less smooth the approximated function is.
For nu=inf, the kernel becomes equivalent to the RBF kernel and for
nu=0.5 to the absolute exponential kernel. Important intermediate
values are nu=1.5 (once differentiable functions) and nu=2.5
(twice differentiable functions). Note that values of nu not in
[0.5, 1.5, 2.5, inf] incur a considerably higher computational cost
(appr. 10 times higher) since they require to evaluate the modified
Bessel function. Furthermore, in contrast to l, nu is kept fixed to
its initial value and not optimized.
"""
def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5),
nu=1.5):
super(Matern, self).__init__(length_scale, length_scale_bounds)
self.nu = nu
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if self.anisotropic and X.shape[1] != self.length_scale.shape[0]:
raise Exception("Anisotropic kernel must have the same number of "
"dimensions as data (%d!=%d)"
% (self.length_scale.shape[0], X.shape[1]))
if Y is None:
dists = pdist(X / self.length_scale, metric='euclidean')
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X / self.length_scale, Y / self.length_scale,
metric='euclidean')
if self.nu == 0.5:
K = np.exp(-dists)
elif self.nu == 1.5:
K = dists * math.sqrt(3)
K = (1. + K) * np.exp(-K)
elif self.nu == 2.5:
K = dists * math.sqrt(5)
K = (1. + K + K ** 2 / 3.0) * np.exp(-K)
else: # general case; expensive to evaluate
K = dists
K[K == 0.0] += np.finfo(float).eps # strict zeros result in nan
tmp = (math.sqrt(2 * self.nu) * K)
K.fill((2 ** (1. - self.nu)) / gamma(self.nu))
K *= tmp ** self.nu
K *= kv(self.nu, tmp)
if Y is None:
# convert from upper-triangular matrix to square matrix
K = squareform(K)
np.fill_diagonal(K, 1)
if eval_gradient:
if self.hyperparameter_length_scale.fixed:
# Hyperparameter l kept fixed
K_gradient = np.empty((X.shape[0], X.shape[0], 0))
return K, K_gradient
# We need to recompute the pairwise dimension-wise distances
if self.anisotropic:
D = (X[:, np.newaxis, :] - X[np.newaxis, :, :])**2 \
/ (self.length_scale ** 2)
else:
D = squareform(dists**2)[:, :, np.newaxis]
if self.nu == 0.5:
K_gradient = K[..., np.newaxis] * D \
/ np.sqrt(D.sum(2))[:, :, np.newaxis]
K_gradient[~np.isfinite(K_gradient)] = 0
elif self.nu == 1.5:
K_gradient = \
3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis]
elif self.nu == 2.5:
tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis]
K_gradient = 5.0 / 3.0 * D * (tmp + 1) * np.exp(-tmp)
else:
# approximate gradient numerically
def f(theta): # helper function
return self.clone_with_theta(theta)(X, Y)
return K, _approx_fprime(self.theta, f, 1e-10)
if not self.anisotropic:
return K, K_gradient[:, :].sum(-1)[:, :, np.newaxis]
else:
return K, K_gradient
else:
return K
def __repr__(self):
if self.anisotropic:
return "{0}(length_scale=[{1}], nu={2:.3g})".format(
self.__class__.__name__,
", ".join(map("{0:.3g}".format, self.length_scale)),
self.nu)
else: # isotropic
return "{0}(length_scale={1:.3g}, nu={2:.3g})".format(
self.__class__.__name__, self.length_scale, self.nu)
class RationalQuadratic(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Rational Quadratic kernel.
The RationalQuadratic kernel can be seen as a scale mixture (an infinite
sum) of RBF kernels with different characteristic length-scales. It is
parameterized by a length-scale parameter length_scale>0 and a scale
mixture parameter alpha>0. Only the isotropic variant where length_scale is
a scalar is supported at the moment. The kernel given by:
k(x_i, x_j) = (1 + d(x_i, x_j)^2 / (2*alpha * length_scale^2))^-alpha
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
alpha : float > 0, default: 1.0
Scale mixture parameter
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
alpha_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on alpha
"""
def __init__(self, length_scale=1.0, alpha=1.0,
length_scale_bounds=(1e-5, 1e5), alpha_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.alpha = alpha
self.length_scale_bounds = length_scale_bounds
self.alpha_bounds = alpha_bounds
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
self.hyperparameter_alpha = \
Hyperparameter("alpha", "numeric", alpha_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='sqeuclidean'))
tmp = dists / (2 * self.alpha * self.length_scale ** 2)
base = (1 + tmp)
K = base ** -self.alpha
np.fill_diagonal(K, 1)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='sqeuclidean')
K = (1 + dists / (2 * self.alpha * self.length_scale ** 2)) \
** -self.alpha
if eval_gradient:
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
dists * K / (self.length_scale ** 2 * base)
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # l is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to alpha
if not self.hyperparameter_alpha.fixed:
alpha_gradient = \
K * (-self.alpha * np.log(base)
+ dists / (2 * self.length_scale ** 2 * base))
alpha_gradient = alpha_gradient[:, :, np.newaxis]
else: # alpha is kept fixed
alpha_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((alpha_gradient, length_scale_gradient))
else:
return K
def __repr__(self):
return "{0}(alpha={1:.3g}, length_scale={2:.3g})".format(
self.__class__.__name__, self.alpha, self.length_scale)
class ExpSineSquared(StationaryKernelMixin, NormalizedKernelMixin, Kernel):
"""Exp-Sine-Squared kernel.
The ExpSineSquared kernel allows modeling periodic functions. It is
parameterized by a length-scale parameter length_scale>0 and a periodicity
parameter periodicity>0. Only the isotropic variant where l is a scalar is
supported at the moment. The kernel given by:
k(x_i, x_j) = exp(-2 sin(\pi / periodicity * d(x_i, x_j)) / length_scale)^2
Parameters
----------
length_scale : float > 0, default: 1.0
The length scale of the kernel.
periodicity : float > 0, default: 1.0
The periodicity of the kernel.
length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on length_scale
periodicity_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on periodicity
"""
def __init__(self, length_scale=1.0, periodicity=1.0,
length_scale_bounds=(1e-5, 1e5),
periodicity_bounds=(1e-5, 1e5)):
self.length_scale = length_scale
self.periodicity = periodicity
self.length_scale_bounds = length_scale_bounds
self.periodicity_bounds = periodicity_bounds
self.hyperparameter_length_scale = \
Hyperparameter("length_scale", "numeric", length_scale_bounds)
self.hyperparameter_periodicity = \
Hyperparameter("periodicity", "numeric", periodicity_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
dists = squareform(pdist(X, metric='euclidean'))
arg = np.pi * dists / self.periodicity
sin_of_arg = np.sin(arg)
K = np.exp(- 2 * (sin_of_arg / self.length_scale) ** 2)
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
dists = cdist(X, Y, metric='euclidean')
K = np.exp(- 2 * (np.sin(np.pi / self.periodicity * dists)
/ self.length_scale) ** 2)
if eval_gradient:
cos_of_arg = np.cos(arg)
# gradient with respect to length_scale
if not self.hyperparameter_length_scale.fixed:
length_scale_gradient = \
4 / self.length_scale**2 * sin_of_arg**2 * K
length_scale_gradient = length_scale_gradient[:, :, np.newaxis]
else: # length_scale is kept fixed
length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0))
# gradient with respect to p
if not self.hyperparameter_periodicity.fixed:
periodicity_gradient = \
4 * arg / self.length_scale**2 * cos_of_arg \
* sin_of_arg * K
periodicity_gradient = periodicity_gradient[:, :, np.newaxis]
else: # p is kept fixed
periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0))
return K, np.dstack((length_scale_gradient, periodicity_gradient))
else:
return K
def __repr__(self):
return "{0}(length_scale={1:.3g}, periodicity={2:.3g})".format(
self.__class__.__name__, self.length_scale, self.periodicity)
class DotProduct(Kernel):
"""Dot-Product kernel.
The DotProduct kernel is non-stationary and can be obtained from linear
regression by putting N(0, 1) priors on the coefficients of x_d (d = 1, . .
. , D) and a prior of N(0, \sigma_0^2) on the bias. The DotProduct kernel
is invariant to a rotation of the coordinates about the origin, but not
translations. It is parameterized by a parameter sigma_0^2. For
sigma_0^2 =0, the kernel is called the homogeneous linear kernel, otherwise
it is inhomogeneous. The kernel is given by
k(x_i, x_j) = sigma_0 ^ 2 + x_i \cdot x_j
The DotProduct kernel is commonly combined with exponentiation.
Parameters
----------
sigma_0 : float >= 0, default: 1.0
Parameter controlling the inhomogenity of the kernel. If sigma_0=0,
the kernel is homogenous.
sigma_0_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on l
"""
def __init__(self, sigma_0=1.0, sigma_0_bounds=(1e-5, 1e5)):
self.sigma_0 = sigma_0
self.sigma_0_bounds = sigma_0_bounds
self.hyperparameter_sigma_0 = \
Hyperparameter("sigma_0", "numeric", sigma_0_bounds)
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
if Y is None:
K = np.inner(X, X) + self.sigma_0 ** 2
else:
if eval_gradient:
raise ValueError(
"Gradient can only be evaluated when Y is None.")
K = np.inner(X, Y) + self.sigma_0 ** 2
if eval_gradient:
if not self.hyperparameter_sigma_0.fixed:
K_gradient = np.empty((K.shape[0], K.shape[1], 1))
K_gradient[..., 0] = 2 * self.sigma_0 ** 2
return K, K_gradient
else:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
return np.einsum('ij,ij->i', X, X) + self.sigma_0 ** 2
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return False
def __repr__(self):
return "{0}(sigma_0={1:.3g})".format(
self.__class__.__name__, self.sigma_0)
# adapted from scipy/optimize/optimize.py for functions with 2d output
def _approx_fprime(xk, f, epsilon, args=()):
f0 = f(*((xk,) + args))
grad = np.zeros((f0.shape[0], f0.shape[1], len(xk)), float)
ei = np.zeros((len(xk), ), float)
for k in range(len(xk)):
ei[k] = 1.0
d = epsilon * ei
grad[:, :, k] = (f(*((xk + d,) + args)) - f0) / d[k]
ei[k] = 0.0
return grad
class PairwiseKernel(Kernel):
"""Wrapper for kernels in sklearn.metrics.pairwise.
A thin wrapper around the functionality of the kernels in
sklearn.metrics.pairwise.
Note: Evaluation of eval_gradient is not analytic but numeric and all
kernels support only isotropic distances. The parameter gamma is
considered to be a hyperparameter and may be optimized. The other
kernel parameters are set directly at initialization and are kept
fixed.
Parameters
----------
gamma: float >= 0, default: 1.0
Parameter gamma of the pairwise kernel specified by metric
gamma_bounds : pair of floats >= 0, default: (1e-5, 1e5)
The lower and upper bound on gamma
metric : string, or callable, default: "linear"
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
pairwise_kernels_kwargs : dict, default: None
All entries of this dict (if any) are passed as keyword arguments to
the pairwise kernel function.
"""
def __init__(self, gamma=1.0, gamma_bounds=(1e-5, 1e5), metric="linear",
pairwise_kernels_kwargs=None):
self.gamma = gamma
self.gamma_bounds = gamma_bounds
self.hyperparameter_gamma = \
Hyperparameter("gamma", "numeric", gamma_bounds)
self.metric = metric
if pairwise_kernels_kwargs is not None:
self.pairwise_kernels_kwargs = pairwise_kernels_kwargs
else:
self.pairwise_kernels_kwargs = {}
def __call__(self, X, Y=None, eval_gradient=False):
"""Return the kernel k(X, Y) and optionally its gradient.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Y : array, shape (n_samples_Y, n_features), (optional, default=None)
Right argument of the returned kernel k(X, Y). If None, k(X, X)
if evaluated instead.
eval_gradient : bool (optional, default=False)
Determines whether the gradient with respect to the kernel
hyperparameter is determined. Only supported when Y is None.
Returns
-------
K : array, shape (n_samples_X, n_samples_Y)
Kernel k(X, Y)
K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims)
The gradient of the kernel k(X, X) with respect to the
hyperparameter of the kernel. Only returned when eval_gradient
is True.
"""
X = np.atleast_2d(X)
K = pairwise_kernels(X, Y, metric=self.metric, gamma=self.gamma,
filter_params=True,
**self.pairwise_kernels_kwargs)
if eval_gradient:
if self.hyperparameter_gamma.fixed:
return K, np.empty((X.shape[0], X.shape[0], 0))
else:
# approximate gradient numerically
def f(gamma): # helper function
return pairwise_kernels(
X, Y, metric=self.metric, gamma=np.exp(gamma),
filter_params=True, **self.pairwise_kernels_kwargs)
return K, _approx_fprime(self.theta, f, 1e-10)
else:
return K
def diag(self, X):
"""Returns the diagonal of the kernel k(X, X).
The result of this method is identical to np.diag(self(X)); however,
it can be evaluated more efficiently since only the diagonal is
evaluated.
Parameters
----------
X : array, shape (n_samples_X, n_features)
Left argument of the returned kernel k(X, Y)
Returns
-------
K_diag : array, shape (n_samples_X,)
Diagonal of kernel k(X, X)
"""
# We have to fall back to slow way of computing diagonal
return np.apply_along_axis(self, 1, X)[:, 0]
def is_stationary(self):
"""Returns whether the kernel is stationary. """
return self.metric in ["rbf"]
def __repr__(self):
return "{0}(gamma={1}, metric={2})".format(
self.__class__.__name__, self.gamma, self.metric)
| bsd-3-clause |
ast0815/remu | docs/examples/04/do_mcmc.py | 2 | 4292 | from six import print_
import numpy as np
from matplotlib import pyplot as plt
from remu import binning
from remu import plotting
from remu import likelihood
from remu import likelihood_utils
import emcee
with open("../01/reco-binning.yml", 'rt') as f:
reco_binning = binning.yaml.full_load(f)
with open("../01/optimised-truth-binning.yml", 'rt') as f:
truth_binning = binning.yaml.full_load(f)
reco_binning.fill_from_csv_file("../00/real_data.txt")
data = reco_binning.get_entries_as_ndarray()
data_model = likelihood.PoissonData(data)
response_matrix = "../03/response_matrix.npz"
matrix_predictor = likelihood.ResponseMatrixPredictor(response_matrix)
calc = likelihood.LikelihoodCalculator(data_model, matrix_predictor)
truth_binning.fill_from_csv_file("../00/modelA_truth.txt")
modelA = truth_binning.get_values_as_ndarray()
modelA /= np.sum(modelA)
modelA_shape = likelihood.TemplatePredictor([modelA])
calcA = calc.compose(modelA_shape)
samplerA = likelihood_utils.emcee_sampler(calcA)
guessA = likelihood_utils.emcee_initial_guess(calcA)
state = samplerA.run_mcmc(guessA, 100)
chain = samplerA.get_chain(flat=True)
with open("chain_shape.txt", 'w') as f:
print_(chain.shape, file=f)
fig, ax = plt.subplots()
ax.hist(chain[:,0])
ax.set_xlabel("model A weight")
fig.savefig("burn_short.png")
with open("burn_short_tau.txt", 'w') as f:
try:
tau = samplerA.get_autocorr_time()
print_(tau, file=f)
except emcee.autocorr.AutocorrError as e:
print_(e, file=f)
samplerA.reset()
state = samplerA.run_mcmc(guessA, 200*50)
chain = samplerA.get_chain(flat=True)
with open("burn_long_tau.txt", 'w') as f:
try:
tau = samplerA.get_autocorr_time()
print_(tau, file=f)
except emcee.autocorr.AutocorrError as e:
print_(e, file=f)
fig, ax = plt.subplots()
ax.hist(chain[:,0])
ax.set_xlabel("model A weight")
fig.savefig("burn_long.png")
samplerA.reset()
state = samplerA.run_mcmc(state, 100*50)
chain = samplerA.get_chain(flat=True)
with open("tauA.txt", 'w') as f:
try:
tau = samplerA.get_autocorr_time()
print_(tau, file=f)
except emcee.autocorr.AutocorrError as e:
print_(e, file=f)
fig, ax = plt.subplots()
ax.hist(chain[:,0])
ax.set_xlabel("model A weight")
fig.savefig("weightA.png")
truth, _ = modelA_shape(chain)
truth.shape = (np.prod(truth.shape[:-1]), truth.shape[-1])
pltr = plotting.get_plotter(truth_binning)
pltr.plot_array(truth, stack_function=np.median, label="Post. median", hatch=None)
pltr.plot_array(truth, stack_function=0.68, label="Post. 68%", scatter=0)
pltr.legend()
pltr.savefig("truthA.png")
reco, _ = calcA.predictor(chain)
reco.shape = (np.prod(reco.shape[:-1]), reco.shape[-1])
pltr = plotting.get_plotter(reco_binning)
pltr.plot_array(reco, stack_function=np.median, label="Post. median", hatch=None)
pltr.plot_array(reco, stack_function=0.68, label="Post. 68%")
pltr.plot_array(data, label="Data", hatch=None, linewidth=2)
pltr.legend()
pltr.savefig("recoA.png")
truth_binning.reset()
truth_binning.fill_from_csv_file("../00/modelB_truth.txt")
modelB = truth_binning.get_values_as_ndarray()
modelB /= np.sum(modelB)
combined = likelihood.TemplatePredictor([modelA, modelB])
calcC = calc.compose(combined)
samplerC = likelihood_utils.emcee_sampler(calcC)
guessC = likelihood_utils.emcee_initial_guess(calcC)
state = samplerC.run_mcmc(guessC, 200*50)
chain = samplerC.get_chain(flat=True)
with open("combined_chain_shape.txt", 'w') as f:
print_(chain.shape, file=f)
with open("burn_combined_tau.txt", 'w') as f:
try:
tau = samplerC.get_autocorr_time()
print_(tau, file=f)
except emcee.autocorr.AutocorrError as e:
print_(e, file=f)
samplerC.reset()
state = samplerC.run_mcmc(state, 100*50)
chain = samplerC.get_chain(flat=True)
with open("combined_tau.txt", 'w') as f:
try:
tau = samplerC.get_autocorr_time()
print_(tau, file=f)
except emcee.autocorr.AutocorrError as e:
print_(e, file=f)
fig, ax = plt.subplots()
ax.hist2d(chain[:,0], chain[:,1])
ax.set_xlabel("model A weight")
ax.set_ylabel("model B weight")
fig.savefig("combined.png")
fig, ax = plt.subplots()
ax.hist(np.sum(chain, axis=-1))
ax.set_xlabel("model A weight + model B weight")
fig.savefig("total.png")
| mit |
mmottahedi/neuralnilm_prototype | scripts/e326.py | 2 | 6706 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 1000
GRADIENT_STEPS = 100
SEQ_LENGTH = 512
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
# 'hair straighteners',
# 'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 1800, 1800],
min_off_durations=[12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=SEQ_LENGTH,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.5,
n_seq_per_batch=16,
subsample_target=4,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs = True,
standardise_input=True,
unit_variance_targets=True,
input_padding=0,
lag=0
# reshape_target_to_2D=True,
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
loss_function=partial(scaled_cost3, ignore_inactive=False),
updates_func=momentum,
learning_rate=1e-3,
learning_rate_changes_by_iteration={
500: 5e-4,
1000: 1e-4
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True
# plotter=MDNPlotter
)
def exp_a(name):
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 2, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
# {
# 'type': FeaturePoolLayer,
# 'ds': 2, # number of feature maps to be pooled together
# 'axis': 1, # pool over the time axis
# 'pool_function': T.max
# },
# {
# 'type': BidirectionalRecurrentLayer,
# 'num_units': N,
# 'gradient_steps': GRADIENT_STEPS,
# 'W_in_to_hid': Normal(std=1/sqrt(N)),
# 'nonlinearity': tanh
# },
{
'type': DenseLayer,
'num_units': source.n_outputs,
'W': Normal(std=1/sqrt(N)),
'nonlinearity': T.nnet.softplus
}
# {
# 'type': MixtureDensityLayer,
# 'num_units': source.n_outputs,
# 'num_components': 1,
# 'nonlinearity_mu': T.nnet.softplus
# }
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
chenhh/PySPPortfolio | PySPPortfolio/pysp_portfolio/exp_cvar.py | 1 | 38832 | # -*- coding: utf-8 -*-
"""
Authors: Hung-Hsin Chen <[email protected]>
License: GPL v2
"""
import os
from time import time
import numpy as np
import pandas as pd
from PySPPortfolio.pysp_portfolio import *
from min_cvar_sp import (MinCVaRSPPortfolio, MinCVaRSPPortfolio2 )
from min_cvar_sip import (MinCVaRSIPPortfolio,MinCVaRSIPPortfolio2)
from min_ms_cvar_sp import (MinMSCVaRSPPortfolio,)
from min_ms_cvar_eventsp import (MinMSCVaREventSPPortfolio,)
from min_ms_cvar_avgsp import (MinMSCVaRAvgSPPortfolio,)
from min_cvar_eev import (MinCVaREEVPortfolio,)
from min_cvar_eevip import (MinCVaREEVIPPortfolio,)
from buy_and_hold import (BAHPortfolio,)
from best import (BestMSPortfolio, BestPortfolio)
from datetime import date
def run_min_cvar_sp_simulation(n_stock, win_length, n_scenario=200,
bias=False, scenario_cnt=1, alpha=0.95,
verbose=False):
"""
2nd stage SP simulation
Parameters:
-------------------
n_stock: integer, number of stocks of the EXP_SYMBOLS to the portfolios
window_length: integer, number of periods for estimating scenarios
n_scenario, int, number of scenarios
bias: bool, biased moment estimators or not
scenario_cnt: count of generated scenarios, default = 1
alpha: float, for conditional risk
Returns:
--------------------
reports
"""
t0 = time()
n_stock, win_length, = int(n_stock), int(win_length)
n_scenario, alpha = int(n_scenario), float(alpha)
# getting experiment symbols
symbols = EXP_SYMBOLS[:n_stock]
param = "{}_{}_m{}_w{}_s{}_{}_{}_a{:.2f}".format(
START_DATE.strftime("%Y%m%d"), END_DATE.strftime("%Y%m%d"),
n_stock, win_length, n_scenario, "biased" if bias else "unbiased",
scenario_cnt, alpha)
# read rois panel
roi_path = os.path.join(SYMBOLS_PKL_DIR,
'TAIEX_2005_largest50cap_panel.pkl')
if not os.path.exists(roi_path):
raise ValueError("{} roi panel does not exist.".format(roi_path))
# shape: (n_period, n_stock, {'simple_roi', 'close_price'})
roi_panel = pd.read_pickle(roi_path)
# shape: (n_period, n_stock)
risk_rois =roi_panel.loc[:, symbols, 'simple_roi'].T
exp_risk_rois = roi_panel.loc[START_DATE:END_DATE, symbols,
'simple_roi'].T
n_period = exp_risk_rois.shape[0]
risk_free_rois = pd.Series(np.zeros(n_period), index=exp_risk_rois.index)
initial_risk_wealth = pd.Series(np.zeros(n_stock), index=symbols)
initial_risk_free_wealth = 1e6
instance = MinCVaRSPPortfolio(symbols, risk_rois, risk_free_rois,
initial_risk_wealth, initial_risk_free_wealth,
window_length=win_length, n_scenario=n_scenario,
bias=bias, alpha=alpha, scenario_cnt=scenario_cnt,
verbose=verbose)
reports = instance.run()
file_name = 'min_cvar_sp_{}.pkl'.format(param)
file_dir = os.path.join(EXP_SP_PORTFOLIO_DIR, 'min_cvar_sp')
if not os.path.exists(file_dir):
os.makedirs(file_dir)
pd.to_pickle(reports, os.path.join(file_dir, file_name))
print ("min cvar sp {} OK, {:.3f} secs".format(param, time()-t0))
return reports
def run_min_cvar_sp2_simulation(n_stock, win_length, n_scenario=200,
bias=False, scenario_cnt=1, alpha=0.95,
verbose=False):
"""
2nd stage SP simulation
Parameters:
-------------------
n_stock: integer, number of stocks of the EXP_SYMBOLS to the portfolios
window_length: integer, number of periods for estimating scenarios
n_scenario, int, number of scenarios
bias: bool, biased moment estimators or not
scenario_cnt: count of generated scenarios, default = 1
alpha: float, for conditional risk
Returns:
--------------------
reports
"""
t0 = time()
n_stock, win_length, = int(n_stock), int(win_length)
n_scenario, alpha = int(n_scenario), float(alpha)
# getting experiment symbols
symbols = EXP_SYMBOLS[:n_stock]
param = "{}_{}_m{}_w{}_s{}_{}_{}_a{:.2f}".format(
START_DATE.strftime("%Y%m%d"), END_DATE.strftime("%Y%m%d"),
n_stock, win_length, n_scenario, "biased" if bias else "unbiased",
scenario_cnt, alpha)
# read rois panel
roi_path = os.path.join(SYMBOLS_PKL_DIR,
'TAIEX_2005_largest50cap_panel.pkl')
if not os.path.exists(roi_path):
raise ValueError("{} roi panel does not exist.".format(roi_path))
# shape: (n_period, n_stock, {'simple_roi', 'close_price'})
roi_panel = pd.read_pickle(roi_path)
# shape: (n_period, n_stock)
risk_rois =roi_panel.loc[:, symbols, 'simple_roi'].T
exp_risk_rois = roi_panel.loc[START_DATE:END_DATE, symbols,
'simple_roi'].T
n_period = exp_risk_rois.shape[0]
risk_free_rois = pd.Series(np.zeros(n_period), index=exp_risk_rois.index)
initial_risk_wealth = pd.Series(np.zeros(n_stock), index=symbols)
initial_risk_free_wealth = 1e6
instance = MinCVaRSPPortfolio2(symbols, risk_rois, risk_free_rois,
initial_risk_wealth, initial_risk_free_wealth,
window_length=win_length, n_scenario=n_scenario,
bias=bias, alpha=alpha, scenario_cnt=scenario_cnt,
verbose=verbose)
reports = instance.run()
file_name = 'min_cvar_sp2_{}.pkl'.format(param)
file_dir = os.path.join(EXP_SP_PORTFOLIO_DIR, 'min_cvar_sp2')
if not os.path.exists(file_dir):
os.makedirs(file_dir)
pd.to_pickle(reports, os.path.join(file_dir, file_name))
print ("min cvar sp2 {} OK, {:.3f} secs".format(param, time()-t0))
return reports
def run_min_cvar_sp2_yearly_simulation(n_stock, win_length, n_scenario=200,
bias=False, scenario_cnt=1, alpha=0.95,
verbose=False, start_date=START_DATE,
end_date=END_DATE):
"""
2nd stage SP simulation
Parameters:
-------------------
n_stock: integer, number of stocks of the EXP_SYMBOLS to the portfolios
window_length: integer, number of periods for estimating scenarios
n_scenario, int, number of scenarios
bias: bool, biased moment estimators or not
scenario_cnt: count of generated scenarios, default = 1
alpha: float, for conditional risk
Returns:
--------------------
reports
"""
t0 = time()
n_stock, win_length, = int(n_stock), int(win_length)
n_scenario, alpha = int(n_scenario), float(alpha)
# getting experiment symbols
symbols = EXP_SYMBOLS[:n_stock]
param = "{}_{}_m{}_w{}_s{}_{}_{}_a{:.2f}".format(
start_date.strftime("%Y%m%d"), end_date.strftime("%Y%m%d"),
n_stock, win_length, n_scenario, "biased" if bias else "unbiased",
scenario_cnt, alpha)
# read rois panel
roi_path = os.path.join(SYMBOLS_PKL_DIR,
'TAIEX_2005_largest50cap_panel.pkl')
if not os.path.exists(roi_path):
raise ValueError("{} roi panel does not exist.".format(roi_path))
# shape: (n_period, n_stock, {'simple_roi', 'close_price'})
roi_panel = pd.read_pickle(roi_path)
# shape: (n_period, n_stock)
risk_rois =roi_panel.loc[:, symbols, 'simple_roi'].T
exp_risk_rois = roi_panel.loc[start_date:end_date, symbols,
'simple_roi'].T
n_period = exp_risk_rois.shape[0]
risk_free_rois = pd.Series(np.zeros(n_period), index=exp_risk_rois.index)
initial_risk_wealth = pd.Series(np.zeros(n_stock), index=symbols)
initial_risk_free_wealth = 1e6
instance = MinCVaRSPPortfolio2(symbols, risk_rois, risk_free_rois,
initial_risk_wealth, initial_risk_free_wealth,
window_length=win_length, n_scenario=n_scenario,
bias=bias, alpha=alpha, scenario_cnt=scenario_cnt,
verbose=verbose, start_date=start_date,
end_date=end_date)
reports = instance.run()
file_name = 'min_cvar_sp2_yearly_{}.pkl'.format(param)
file_dir = os.path.join(EXP_SP_PORTFOLIO_DIR, 'min_cvar_sp2_yearly')
if not os.path.exists(file_dir):
os.makedirs(file_dir)
pd.to_pickle(reports, os.path.join(file_dir, file_name))
print ("min cvar sp2 yearly {} OK, {:.3f} secs".format(param, time()-t0))
return reports
def run_min_cvar_sip_simulation(max_portfolio_size, window_length,
n_scenario=200, bias=False, scenario_cnt=1,
alpha=0.95, verbose=False):
"""
2nd stage SIP simulation
in the model, all stocks are used as candidate symbols.
Parameters:
-------------------
max_portfolio_size: integer, number of stocks in the portfolio.
window_length: integer, number of periods for estimating scenarios
n_scenario, int, number of scenarios
bias: bool, biased moment estimators or not
scenario_cnt: count of generated scenarios, default = 1
alpha: float, for conditional risk
Returns:
--------------------
reports
"""
t0 = time()
max_portfolio_size = int(max_portfolio_size)
window_length = int(window_length)
n_scenario = int(n_scenario)
alpha = float(alpha)
symbols = EXP_SYMBOLS
n_stock = len(symbols)
param = "{}_{}_all{}_m{}_w{}_s{}_{}_{}_a{:.2f}".format(
START_DATE.strftime("%Y%m%d"), END_DATE.strftime("%Y%m%d"),
len(symbols), max_portfolio_size, window_length, n_scenario,
"biased" if bias else "unbiased", scenario_cnt, alpha)
# read rois panel
roi_path = os.path.join(SYMBOLS_PKL_DIR,
'TAIEX_2005_largest50cap_panel.pkl')
if not os.path.exists(roi_path):
raise ValueError("{} roi panel does not exist.".format(roi_path))
# shape: (n_period, n_stock, {'simple_roi', 'close_price'})
roi_panel = pd.read_pickle(roi_path)
# shape: (n_period, n_stock)
risk_rois =roi_panel.loc[:, symbols, 'simple_roi'].T
exp_risk_rois = roi_panel.loc[START_DATE:END_DATE, symbols, 'simple_roi'].T
n_period = exp_risk_rois.shape[0]
risk_free_rois = pd.Series(np.zeros(n_period), index=exp_risk_rois.index)
initial_risk_wealth = pd.Series(np.zeros(n_stock), index=symbols)
initial_risk_free_wealth = 1e6
instance = MinCVaRSIPPortfolio(symbols, max_portfolio_size,
risk_rois, risk_free_rois,
initial_risk_wealth,
initial_risk_free_wealth,
window_length=window_length,
n_scenario=n_scenario,
bias=bias,
alpha=alpha,
scenario_cnt=scenario_cnt,
verbose=verbose)
reports = instance.run()
file_name = 'min_cvar_sip_{}.pkl'.format(param)
file_dir = os.path.join(EXP_SP_PORTFOLIO_DIR, 'min_cvar_sip')
if not os.path.exists(file_dir):
os.makedirs(file_dir)
pd.to_pickle(reports, os.path.join(file_dir, file_name))
print ("min cvar sip {} OK, {:.3f} secs".format(param, time()-t0))
return reports
def run_min_cvar_sip2_simulation(max_portfolio_size, window_length,
n_scenario=200, bias=False, scenario_cnt=1,
alpha=0.95, verbose=False):
"""
2nd stage SIP simulation
in the model, all stocks are used as candidate symbols.
Parameters:
-------------------
max_portfolio_size: integer, number of stocks in the portfolio.
window_length: integer, number of periods for estimating scenarios
n_scenario, int, number of scenarios
bias: bool, biased moment estimators or not
scenario_cnt: count of generated scenarios, default = 1
alpha: float, for conditional risk
Returns:
--------------------
reports
"""
t0 = time()
max_portfolio_size = int(max_portfolio_size)
window_length = int(window_length)
n_scenario = int(n_scenario)
alpha = float(alpha)
symbols = EXP_SYMBOLS
n_stock = len(symbols)
param = "{}_{}_all{}_m{}_w{}_s{}_{}_{}_a{:.2f}".format(
START_DATE.strftime("%Y%m%d"), END_DATE.strftime("%Y%m%d"),
len(symbols), max_portfolio_size, window_length, n_scenario,
"biased" if bias else "unbiased", scenario_cnt, alpha)
# read rois panel
roi_path = os.path.join(SYMBOLS_PKL_DIR,
'TAIEX_2005_largest50cap_panel.pkl')
if not os.path.exists(roi_path):
raise ValueError("{} roi panel does not exist.".format(roi_path))
# shape: (n_period, n_stock, {'simple_roi', 'close_price'})
roi_panel = pd.read_pickle(roi_path)
# shape: (n_period, n_stock)
risk_rois =roi_panel.loc[:, symbols, 'simple_roi'].T
exp_risk_rois = roi_panel.loc[START_DATE:END_DATE, symbols, 'simple_roi'].T
n_period = exp_risk_rois.shape[0]
risk_free_rois = pd.Series(np.zeros(n_period), index=exp_risk_rois.index)
initial_risk_wealth = pd.Series(np.zeros(n_stock), index=symbols)
initial_risk_free_wealth = 1e6
instance = MinCVaRSIPPortfolio2(symbols, max_portfolio_size,
risk_rois, risk_free_rois,
initial_risk_wealth,
initial_risk_free_wealth,
window_length=window_length,
n_scenario=n_scenario,
bias=bias,
alpha=alpha,
scenario_cnt=scenario_cnt,
verbose=verbose)
reports = instance.run()
file_name = 'min_cvar_sip2_{}.pkl'.format(param)
file_dir = os.path.join(EXP_SP_PORTFOLIO_DIR, 'min_cvar_sip2')
if not os.path.exists(file_dir):
os.makedirs(file_dir)
pd.to_pickle(reports, os.path.join(file_dir, file_name))
print ("min cvar sip2 {} OK, {:.3f} secs".format(param, time()-t0))
return reports
def run_min_cvar_sip2_yearly_simulation(max_portfolio_size, window_length,
n_scenario=200, bias=False, scenario_cnt=1,
alpha=0.95, verbose=False,
start_date=START_DATE, end_date=END_DATE):
"""
2nd stage SIP simulation
in the model, all stocks are used as candidate symbols.
Parameters:
-------------------
max_portfolio_size: integer, number of stocks in the portfolio.
window_length: integer, number of periods for estimating scenarios
n_scenario, int, number of scenarios
bias: bool, biased moment estimators or not
scenario_cnt: count of generated scenarios, default = 1
alpha: float, for conditional risk
Returns:
--------------------
reports
"""
t0 = time()
max_portfolio_size = int(max_portfolio_size)
window_length = int(window_length)
n_scenario = int(n_scenario)
alpha = float(alpha)
symbols = EXP_SYMBOLS
n_stock = len(symbols)
param = "{}_{}_all{}_m{}_w{}_s{}_{}_{}_a{:.2f}".format(
start_date.strftime("%Y%m%d"), end_date.strftime("%Y%m%d"),
len(symbols), max_portfolio_size, window_length, n_scenario,
"biased" if bias else "unbiased", scenario_cnt, alpha)
# read rois panel
roi_path = os.path.join(SYMBOLS_PKL_DIR,
'TAIEX_2005_largest50cap_panel.pkl')
if not os.path.exists(roi_path):
raise ValueError("{} roi panel does not exist.".format(roi_path))
# shape: (n_period, n_stock, {'simple_roi', 'close_price'})
roi_panel = pd.read_pickle(roi_path)
# shape: (n_period, n_stock)
risk_rois =roi_panel.loc[:, symbols, 'simple_roi'].T
exp_risk_rois = roi_panel.loc[start_date:end_date, symbols, 'simple_roi'].T
n_period = exp_risk_rois.shape[0]
risk_free_rois = pd.Series(np.zeros(n_period), index=exp_risk_rois.index)
initial_risk_wealth = pd.Series(np.zeros(n_stock), index=symbols)
initial_risk_free_wealth = 1e6
instance = MinCVaRSIPPortfolio2(symbols, max_portfolio_size,
risk_rois, risk_free_rois,
initial_risk_wealth,
initial_risk_free_wealth,
window_length=window_length,
n_scenario=n_scenario,
bias=bias,
alpha=alpha,
scenario_cnt=scenario_cnt,
verbose=verbose,
start_date=start_date,
end_date=end_date)
reports = instance.run()
file_name = 'min_cvar_sip2_yearly_{}.pkl'.format(param)
file_dir = os.path.join(EXP_SP_PORTFOLIO_DIR, 'min_cvar_sip2_yearly')
if not os.path.exists(file_dir):
os.makedirs(file_dir)
pd.to_pickle(reports, os.path.join(file_dir, file_name))
print ("min cvar sip2 yearly {} OK, {:.3f} secs".format(param, time()-t0))
return reports
def run_min_ms_cvar_sp_simulation(n_stock, win_length, n_scenario=200,
bias=False, scenario_cnt=1,
alphas=[0.5, 0.55, 0.6, 0.65, 0.7, 0.75,
0.8, 0.85, 0.9, 0.95],
verbose=False):
"""
multi-stage SP simulation
Parameters:
-------------------
n_stock: integer, number of stocks of the EXP_SYMBOLS to the portfolios
window_length: integer, number of periods for estimating scenarios
n_scenario, int, number of scenarios
bias: bool, biased moment estimators or not
scenario_cnt: count of generated scenarios, default = 1
alpha: float, for conditional risk
Returns:
--------------------
reports
"""
t0 = time()
n_stock, win_length, = int(n_stock), int(win_length)
n_scenario = int(n_scenario)
# getting experiment symbols
symbols = EXP_SYMBOLS[:n_stock]
param = "{}_{}_m{}_w{}_s{}_{}_{}".format(
START_DATE.strftime("%Y%m%d"), END_DATE.strftime("%Y%m%d"),
n_stock, win_length, n_scenario, "biased" if bias else "unbiased",
scenario_cnt)
# read rois panel
roi_path = os.path.join(SYMBOLS_PKL_DIR,
'TAIEX_2005_largest50cap_panel.pkl')
if not os.path.exists(roi_path):
raise ValueError("{} roi panel does not exist.".format(roi_path))
# shape: (n_period, n_stock, {'simple_roi', 'close_price'})
roi_panel = pd.read_pickle(roi_path)
# shape: (n_period, n_stock)
risk_rois =roi_panel.loc[:, symbols, 'simple_roi'].T
exp_risk_rois = roi_panel.loc[START_DATE:END_DATE, symbols,
'simple_roi'].T
n_period = exp_risk_rois.shape[0]
risk_free_rois = pd.Series(np.zeros(n_period), index=exp_risk_rois.index)
initial_risk_wealth = pd.Series(np.zeros(n_stock), index=symbols)
initial_risk_free_wealth = 1e6
print ("min ms cvar sp {} start.".format(param))
t1 = time()
instance = MinMSCVaRSPPortfolio(symbols, risk_rois, risk_free_rois,
initial_risk_wealth, initial_risk_free_wealth,
window_length=win_length, n_scenario=n_scenario,
bias=bias, alphas=alphas, scenario_cnt=scenario_cnt,
verbose=verbose)
print ("min ms cvar sp {} ready to run: {:.3f} secs".format(
param, time() - t1))
reports_dict = instance.run()
file_dir = os.path.join(EXP_SP_PORTFOLIO_DIR, 'min_ms_cvar_sp')
if not os.path.exists(file_dir):
os.makedirs(file_dir)
for alpha_str, reports in reports_dict.items():
alpha = reports['alpha']
file_name = 'min_ms_cvar_sp_{}_a{:.2f}.pkl'.format(param, alpha)
pd.to_pickle(reports, os.path.join(file_dir, file_name))
print ("ms min cvar sp {}_a{:.2f} OK, {:.3f} secs".format(
param, alpha, time() - t0))
return reports_dict
def run_min_ms_cvar_eventsp_simulation(n_stock, win_length, n_scenario=200,
bias=False, scenario_cnt=1, alpha = 0.95,
verbose=False, start_date=date(2005,1,3),
end_date=date(2014,12,31), solver_io="lp",
keepfiles=False):
"""
multi-stage event scenario SP simulation
Parameters:
-------------------
n_stock: integer, number of stocks of the EXP_SYMBOLS to the portfolios
window_length: integer, number of periods for estimating scenarios
n_scenario, int, number of scenarios
bias: bool, biased moment estimators or not
scenario_cnt: count of generated scenarios, default = 1
alpha: float, for conditional risk
Returns:
--------------------
reports
"""
t0 = time()
n_stock, win_length, = int(n_stock), int(win_length)
n_scenario = int(n_scenario)
# getting experiment symbols
symbols = EXP_SYMBOLS[:n_stock]
# read rois panel
roi_path = os.path.join(SYMBOLS_PKL_DIR,
'TAIEX_2005_largest50cap_panel.pkl')
if not os.path.exists(roi_path):
raise ValueError("{} roi panel does not exist.".format(roi_path))
# shape: (n_period, n_stock, {'simple_roi', 'close_price'})
roi_panel = pd.read_pickle(roi_path)
# shape: (n_period, n_stock)
risk_rois = roi_panel.loc[:, symbols, 'simple_roi'].T
exp_risk_rois = roi_panel.loc[start_date:end_date, symbols,
'simple_roi'].T
n_period = exp_risk_rois.shape[0]
risk_free_rois = pd.Series(np.zeros(n_period), index=exp_risk_rois.index)
initial_risk_wealth = pd.Series(np.zeros(n_stock), index=symbols)
initial_risk_free_wealth = 1
param = "{}_{}_m{}_w{}_s{}_{}_{}_a{:.2f}".format(
exp_risk_rois.index[0].strftime("%Y%m%d"),
exp_risk_rois.index[-1].strftime("%Y%m%d"),
n_stock, win_length, n_scenario, "biased" if bias else "unbiased",
scenario_cnt, alpha)
instance = MinMSCVaREventSPPortfolio(symbols, risk_rois, risk_free_rois,
initial_risk_wealth,
initial_risk_free_wealth,
window_length=win_length,
n_scenario=n_scenario,
bias=bias, alpha=alpha,
scenario_cnt=scenario_cnt,
start_date=start_date,
end_date=end_date,
verbose=verbose,
solver_io=solver_io,
keepfiles=keepfiles
)
reports = instance.run()
print reports
prob_name = "min_ms_cvar_eventsp"
file_name = '{}_{}.pkl'.format(prob_name, param)
file_dir = os.path.join(EXP_SP_PORTFOLIO_DIR, prob_name)
if not os.path.exists(file_dir):
os.makedirs(file_dir)
pd.to_pickle(reports, os.path.join(file_dir, file_name))
print ("{} {} OK, {:.3f} secs".format(prob_name, param, time() - t0))
return reports
def run_min_ms_cvar_avgsp_simulation(n_stock, win_length, n_scenario=200,
bias=False, scenario_cnt=1, alpha = 0.95,
verbose=False):
"""
multi-stage average scenario SP simulation
the results are independent to the alphas
Parameters:
-------------------
n_stock: integer, number of stocks of the EXP_SYMBOLS to the portfolios
window_length: integer, number of periods for estimating scenarios
n_scenario, int, number of scenarios
bias: bool, biased moment estimators or not
scenario_cnt: count of generated scenarios, default = 1
alpha: float, for conditional risk
Returns:
--------------------
reports
"""
t0 = time()
n_stock, win_length, = int(n_stock), int(win_length)
n_scenario = int(n_scenario)
# getting experiment symbols
symbols = EXP_SYMBOLS[:n_stock]
param = "{}_{}_m{}_w{}_s{}_{}_{}_a{:.2f}".format(
START_DATE.strftime("%Y%m%d"), END_DATE.strftime("%Y%m%d"),
n_stock, win_length, n_scenario, "biased" if bias else "unbiased",
scenario_cnt, alpha)
# read rois panel
roi_path = os.path.join(SYMBOLS_PKL_DIR,
'TAIEX_2005_largest50cap_panel.pkl')
if not os.path.exists(roi_path):
raise ValueError("{} roi panel does not exist.".format(roi_path))
# shape: (n_period, n_stock, {'simple_roi', 'close_price'})
roi_panel = pd.read_pickle(roi_path)
# shape: (n_period, n_stock)
risk_rois = roi_panel.loc[:, symbols, 'simple_roi'].T
exp_risk_rois = roi_panel.loc[START_DATE:END_DATE, symbols,
'simple_roi'].T
n_period = exp_risk_rois.shape[0]
risk_free_rois = pd.Series(np.zeros(n_period), index=exp_risk_rois.index)
initial_risk_wealth = pd.Series(np.zeros(n_stock), index=symbols)
initial_risk_free_wealth = 1e6
instance = MinMSCVaRAvgSPPortfolio(symbols, risk_rois, risk_free_rois,
initial_risk_wealth,
initial_risk_free_wealth,
window_length=win_length,
n_scenario=n_scenario,
bias=bias, alpha=alpha,
scenario_cnt=scenario_cnt,
verbose=verbose)
reports = instance.run()
# the reports is the scenario simulation results, it still need to compute
# truly wealth process by using the wealth process
# print reports.keys()
prob_name = "min_ms_cvar_avgsp"
file_name = '{}_{}.pkl'.format(prob_name, param)
file_dir = os.path.join(EXP_SP_PORTFOLIO_DIR, prob_name)
if not os.path.exists(file_dir):
os.makedirs(file_dir)
pd.to_pickle(reports, os.path.join(file_dir, file_name))
print ("{} {} OK, {:.3f} secs".format(prob_name, param, time() - t0))
return reports
def run_min_cvar_eev_simulation(n_stock, win_length, n_scenario=200,
bias=False, scenario_cnt=1, alpha=0.95,
verbose=False):
"""
2nd stage expected of expected value simulation
Parameters:
-------------------
n_stock: integer, number of stocks of the EXP_SYMBOLS to the portfolios
window_length: integer, number of periods for estimating scenarios
n_scenario, int, number of scenarios
bias: bool, biased moment estimators or not
scenario_cnt: count of generated scenarios, default = 1
alpha: float, for conditional risk
Returns:
--------------------
reports
"""
t0 = time()
n_stock, win_length, = int(n_stock), int(win_length)
n_scenario, alpha = int(n_scenario), float(alpha)
# getting experiment symbols
symbols = EXP_SYMBOLS[:n_stock]
param = "{}_{}_m{}_w{}_s{}_{}_{}_a{:.2f}".format(
START_DATE.strftime("%Y%m%d"), END_DATE.strftime("%Y%m%d"),
n_stock, win_length, n_scenario, "biased" if bias else "unbiased",
scenario_cnt, alpha)
# read rois panel
roi_path = os.path.join(SYMBOLS_PKL_DIR,
'TAIEX_2005_largest50cap_panel.pkl')
if not os.path.exists(roi_path):
raise ValueError("{} roi panel does not exist.".format(roi_path))
# shape: (n_period, n_stock, {'simple_roi', 'close_price'})
roi_panel = pd.read_pickle(roi_path)
# shape: (n_period, n_stock)
risk_rois =roi_panel.loc[:, symbols, 'simple_roi'].T
exp_risk_rois = roi_panel.loc[START_DATE:END_DATE, symbols,
'simple_roi'].T
n_period = exp_risk_rois.shape[0]
risk_free_rois = pd.Series(np.zeros(n_period), index=exp_risk_rois.index)
initial_risk_wealth = pd.Series(np.zeros(n_stock), index=symbols)
initial_risk_free_wealth = 1e6
instance = MinCVaREEVPortfolio(symbols, risk_rois, risk_free_rois,
initial_risk_wealth, initial_risk_free_wealth,
window_length=win_length, n_scenario=n_scenario,
bias=bias, alpha=alpha, scenario_cnt=scenario_cnt,
verbose=verbose)
reports = instance.run()
prob_name = "min_cvar_eev"
file_name = '{}_{}.pkl'.format(prob_name, param)
file_dir = os.path.join(EXP_SP_PORTFOLIO_DIR, prob_name)
if not os.path.exists(file_dir):
os.makedirs(file_dir)
pd.to_pickle(reports, os.path.join(file_dir, file_name))
print ("{} {} OK, {:.3f} secs".format(prob_name, param, time()-t0))
return reports
def run_min_cvar_eevip_simulation(max_portfolio_size, window_length,
n_scenario=200,
bias=False, scenario_cnt=1, alpha=0.95,
verbose=False):
"""
2nd stage expected of expected value simulation
Parameters:
-------------------
n_stock: integer, number of stocks of the EXP_SYMBOLS to the portfolios
window_length: integer, number of periods for estimating scenarios
n_scenario, int, number of scenarios
bias: bool, biased moment estimators or not
scenario_cnt: count of generated scenarios, default = 1
alpha: float, for conditional risk
Returns:
--------------------
reports
"""
t0 = time()
max_portfolio_size = int(max_portfolio_size)
window_length = int(window_length)
n_scenario = int(n_scenario)
alpha = float(alpha)
symbols = EXP_SYMBOLS
n_stock = len(symbols)
param = "{}_{}_all{}_m{}_w{}_s{}_{}_{}_a{:.2f}".format(
START_DATE.strftime("%Y%m%d"), END_DATE.strftime("%Y%m%d"),
len(symbols), max_portfolio_size, window_length, n_scenario,
"biased" if bias else "unbiased", scenario_cnt, alpha)
# read rois panel
roi_path = os.path.join(SYMBOLS_PKL_DIR,
'TAIEX_2005_largest50cap_panel.pkl')
if not os.path.exists(roi_path):
raise ValueError("{} roi panel does not exist.".format(roi_path))
# shape: (n_period, n_stock, {'simple_roi', 'close_price'})
roi_panel = pd.read_pickle(roi_path)
# shape: (n_period, n_stock)
risk_rois =roi_panel.loc[:, symbols, 'simple_roi'].T
exp_risk_rois = roi_panel.loc[START_DATE:END_DATE, symbols, 'simple_roi'].T
n_period = exp_risk_rois.shape[0]
risk_free_rois = pd.Series(np.zeros(n_period), index=exp_risk_rois.index)
initial_risk_wealth = pd.Series(np.zeros(n_stock), index=symbols)
initial_risk_free_wealth = 1e6
instance = MinCVaREEVIPPortfolio(symbols, max_portfolio_size,
risk_rois, risk_free_rois,
initial_risk_wealth,
initial_risk_free_wealth,
window_length=window_length,
n_scenario=n_scenario,
bias=bias,
alpha=alpha,
scenario_cnt=scenario_cnt,
verbose=verbose,
)
reports = instance.run()
prob_name = "min_cvar_eevip"
file_name = '{}_{}.pkl'.format(prob_name, param)
file_dir = os.path.join(EXP_SP_PORTFOLIO_DIR, prob_name)
if not os.path.exists(file_dir):
os.makedirs(file_dir)
pd.to_pickle(reports, os.path.join(file_dir, file_name))
print ("{} {} OK, {:.3f} secs".format(prob_name, param, time()-t0))
return reports
def run_bah_simulation(n_stock ,verbose=False):
"""
The Buy-And-Hold (BAH) strategy,
"""
t0 = time()
# read rois panel
roi_path = os.path.join(SYMBOLS_PKL_DIR,
'TAIEX_2005_largest50cap_panel.pkl')
if not os.path.exists(roi_path):
raise ValueError("{} roi panel does not exist.".format(roi_path))
param = "{}_{}_m{}".format(
START_DATE.strftime("%Y%m%d"), END_DATE.strftime("%Y%m%d"),
n_stock)
symbols = EXP_SYMBOLS[:n_stock]
n_stock = len(symbols)
# shape: (n_period, n_stock, {'simple_roi', 'close_price'})
roi_panel = pd.read_pickle(roi_path)
# shape: (n_period, n_stock)
exp_risk_rois = roi_panel.loc[START_DATE:END_DATE, symbols,
'simple_roi'].T
n_exp_period = exp_risk_rois.shape[0]
exp_risk_free_rois = pd.Series(np.zeros(n_exp_period),
index=exp_risk_rois.index)
allocated_risk_wealth = pd.Series(np.zeros(n_stock), index=symbols)
initial_wealth = 1e6
instance = BAHPortfolio(symbols, exp_risk_rois, exp_risk_free_rois,
allocated_risk_wealth, initial_wealth,
start_date=START_DATE, end_date=END_DATE)
reports = instance.run()
file_name = 'bah_{}.pkl'.format(param)
file_dir = os.path.join(EXP_SP_PORTFOLIO_DIR, 'bah')
if not os.path.exists(file_dir):
os.makedirs(file_dir)
pd.to_pickle(reports, os.path.join(file_dir, file_name))
print ("BAH {} OK, {:.3f} secs".format(param, time()-t0))
def run_best_simulation(n_stock ,verbose=False):
"""
The best stage-wise strategy,
"""
t0 = time()
# read rois panel
roi_path = os.path.join(SYMBOLS_PKL_DIR,
'TAIEX_2005_largest50cap_panel.pkl')
if not os.path.exists(roi_path):
raise ValueError("{} roi panel does not exist.".format(roi_path))
param = "{}_{}_m{}".format(
START_DATE.strftime("%Y%m%d"), END_DATE.strftime("%Y%m%d"),
n_stock)
symbols = EXP_SYMBOLS[:n_stock]
n_stock = len(symbols)
# shape: (n_period, n_stock, {'simple_roi', 'close_price'})
roi_panel = pd.read_pickle(roi_path)
# shape: (n_exp_period, n_stock)
exp_risk_rois = roi_panel.loc[START_DATE:END_DATE, symbols,
'simple_roi'].T
n_exp_period = exp_risk_rois.shape[0]
# shape: (n_exp_period, )
exp_risk_free_rois = pd.Series(np.zeros(n_exp_period),
index=exp_risk_rois.index)
allocated_risk_wealth = pd.Series(np.zeros(n_stock), index=symbols)
initial_wealth = 1e6
instance = BestPortfolio(symbols, exp_risk_rois, exp_risk_free_rois,
allocated_risk_wealth, initial_wealth,
start_date=START_DATE, end_date=END_DATE)
reports = instance.run()
file_name = 'best_{}.pkl'.format(param)
file_dir = os.path.join(EXP_SP_PORTFOLIO_DIR, 'best')
if not os.path.exists(file_dir):
os.makedirs(file_dir)
pd.to_pickle(reports, os.path.join(file_dir, file_name))
print ("best {} OK, {:.3f} secs".format(param, time()-t0))
def run_best_ms_simulation(n_stock ,verbose=False):
"""
The best multi-stage strategy,
"""
t0 = time()
# read rois panel
roi_path = os.path.join(SYMBOLS_PKL_DIR,
'TAIEX_2005_largest50cap_panel.pkl')
if not os.path.exists(roi_path):
raise ValueError("{} roi panel does not exist.".format(roi_path))
param = "{}_{}_m{}".format(
START_DATE.strftime("%Y%m%d"), END_DATE.strftime("%Y%m%d"),
n_stock)
symbols = EXP_SYMBOLS[:n_stock]
n_stock = len(symbols)
# shape: (n_period, n_stock, {'simple_roi', 'close_price'})
roi_panel = pd.read_pickle(roi_path)
# shape: (n_period, n_stock)
exp_risk_rois = roi_panel.loc[START_DATE:END_DATE, symbols,
'simple_roi'].T
n_exp_period = exp_risk_rois.shape[0]
exp_risk_free_rois = pd.Series(np.zeros(n_exp_period),
index=exp_risk_rois.index)
allocated_risk_wealth = pd.Series(np.zeros(n_stock), index=symbols)
initial_wealth = 1e6
instance = BestMSPortfolio(symbols, exp_risk_rois, exp_risk_free_rois,
allocated_risk_wealth, initial_wealth,
start_date=START_DATE, end_date=END_DATE)
reports = instance.run()
file_name = 'best_ms_{}.pkl'.format(param)
file_dir = os.path.join(EXP_SP_PORTFOLIO_DIR, 'best_ms')
if not os.path.exists(file_dir):
os.makedirs(file_dir)
pd.to_pickle(reports, os.path.join(file_dir, file_name))
print ("best_ms {} OK, {:.3f} secs".format(param, time()-t0))
if __name__ == '__main__':
pass
# for n_stock in xrange(5, 50+5, 5):
# run_bah_simulation(n_stock)
# params = [
# (5, 100 ,0.5),
# # (10, 50, 0.7),
# # (15, 80, 0.5), (20, 110, 0.5),
# # (25, 100, 0.55), (30, 110, 0.6),
# # (35, 110, 0.5), (40, 110, 0.5), (45, 120, 0.55),
# # (50 120, 0.5)
# ]
#
# for m, w, a in params:
# for cnt in xrange(1, 3+1):
# try:
# run_min_cvar_eev_simulation(m, w, scenario_cnt=cnt, alpha=a,
# verbose=True)
# except ValueError as e:
# print e
# continue
# run_min_cvar_eev_simulation(10, 220, scenario_cnt=1, alpha=0.95)
# for m in xrange(5, 55, 5):
# run_bah_simulation(m)
# run_min_cvar_sip2_simulation(10, 190, scenario_cnt=1, alpha=0.95,
# verbose=True)
# run_min_ms_cvar_avgsp_simulation(10, 200, scenario_cnt=1, alpha=0.9)
run_min_ms_cvar_eventsp_simulation(15, 100, n_scenario=200,
alpha=0.65,
start_date=date(2005, 1, 3),
end_date=date(2005, 1, 31),
solver_io="lp",
keepfiles=False)
# analysis_results("min_cvar_sp", 5, 50, n_scenario=200,
# bias=False, scenario_cnt=1, alpha=0.95)
# run_min_ms_cvar_sp_simulation(10, 220, n_scenario=200,
# bias=False, scenario_cnt=1,
# alphas=[0.5, 0.55],
# # alphas=[0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8,
# # 0.85, 0.9, 0.95],
# verbose=False)
# for n_stock in xrange(15, 50+5, 5):
# run_best_simulation(10)
# run_best_ms_simulation(5)
# run_min_cvar_eevip_simulation(10, 190)
| gpl-3.0 |
ankurankan/scikit-learn | examples/linear_model/plot_omp.py | 385 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
davidbuzz/ardupilot | Tools/LogAnalyzer/tests/TestOptFlow.py | 32 | 14968 | from LogAnalyzer import Test,TestResult
import DataflashLog
from math import sqrt
import numpy as np
import matplotlib.pyplot as plt
class TestFlow(Test):
'''test optical flow sensor scale factor calibration'''
#
# Use the following procedure to log the calibration data. is assumed that the optical flow sensor has been
# correctly aligned, is focussed and the test is performed over a textured surface with adequate lighting.
# Note that the strobing effect from non incandescent artifical lighting can produce poor optical flow measurements.
#
# 1) Set LOG_DISARMED and FLOW_TYPE to 10 and verify that ATT and OF messages are being logged onboard
# 2) Place on level ground, apply power and wait for EKF to complete attitude alignment
# 3) Keeping the copter level, lift it to shoulder height and rock between +-20 and +-30 degrees
# in roll about an axis that passes through the flow sensor lens assembly. The time taken to rotate from
# maximum left roll to maximum right roll should be about 1 second.
# 4) Repeat 3) about the pitch axis
# 5) Holding the copter level, lower it to the ground and remove power
# 6) Transfer the logfile from the sdcard.
# 7) Open a terminal and cd to the ardupilot/Tools/LogAnalyzer directory
# 8) Enter to run the analysis 'python LogAnalyzer.py <log file name including full path>'
# 9) Check the OpticalFlow test status printed to the screen. The analysis plots are saved to
# flow_calibration.pdf and the recommended scale factors to flow_calibration.param
def __init__(self):
Test.__init__(self)
self.name = "OpticalFlow"
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
def FAIL():
self.result.status = TestResult.StatusType.FAIL
def WARN():
if self.result.status != TestResult.StatusType.FAIL:
self.result.status = TestResult.StatusType.WARN
try:
# tuning parameters used by the algorithm
tilt_threshold = 15 # roll and pitch threshold used to start and stop calibration (deg)
quality_threshold = 124 # minimum flow quality required for data to be used by the curve fit (N/A)
min_rate_threshold = 0.0 # if the gyro rate is less than this, the data will not be used by the curve fit (rad/sec)
max_rate_threshold = 2.0 # if the gyro rate is greter than this, the data will not be used by the curve fit (rad/sec)
param_std_threshold = 5.0 # maximum allowable 1-std uncertainty in scaling parameter (scale factor * 1000)
param_abs_threshold = 200 # max/min allowable scale factor parameter. Values of FLOW_FXSCALER and FLOW_FYSCALER outside the range of +-param_abs_threshold indicate a sensor configuration problem.
min_num_points = 100 # minimum number of points required for a curve fit - this is necessary, but not sufficient condition - the standard deviation estimate of the fit gradient is also important.
# get the existing scale parameters
flow_fxscaler = logdata.parameters["FLOW_FXSCALER"]
flow_fyscaler = logdata.parameters["FLOW_FYSCALER"]
# load required optical flow data
if "OF" in logdata.channels:
flowX = np.zeros(len(logdata.channels["OF"]["flowX"].listData))
for i in range(len(logdata.channels["OF"]["flowX"].listData)):
(line, flowX[i]) = logdata.channels["OF"]["flowX"].listData[i]
bodyX = np.zeros(len(logdata.channels["OF"]["bodyX"].listData))
for i in range(len(logdata.channels["OF"]["bodyX"].listData)):
(line, bodyX[i]) = logdata.channels["OF"]["bodyX"].listData[i]
flowY = np.zeros(len(logdata.channels["OF"]["flowY"].listData))
for i in range(len(logdata.channels["OF"]["flowY"].listData)):
(line, flowY[i]) = logdata.channels["OF"]["flowY"].listData[i]
bodyY = np.zeros(len(logdata.channels["OF"]["bodyY"].listData))
for i in range(len(logdata.channels["OF"]["bodyY"].listData)):
(line, bodyY[i]) = logdata.channels["OF"]["bodyY"].listData[i]
flow_time_us = np.zeros(len(logdata.channels["OF"]["TimeUS"].listData))
for i in range(len(logdata.channels["OF"]["TimeUS"].listData)):
(line, flow_time_us[i]) = logdata.channels["OF"]["TimeUS"].listData[i]
flow_qual = np.zeros(len(logdata.channels["OF"]["Qual"].listData))
for i in range(len(logdata.channels["OF"]["Qual"].listData)):
(line, flow_qual[i]) = logdata.channels["OF"]["Qual"].listData[i]
else:
FAIL()
self.result.statusMessage = "FAIL: no optical flow data\n"
return
# load required attitude data
if "ATT" in logdata.channels:
Roll = np.zeros(len(logdata.channels["ATT"]["Roll"].listData))
for i in range(len(logdata.channels["ATT"]["Roll"].listData)):
(line, Roll[i]) = logdata.channels["ATT"]["Roll"].listData[i]
Pitch = np.zeros(len(logdata.channels["ATT"]["Pitch"].listData))
for i in range(len(logdata.channels["ATT"]["Pitch"].listData)):
(line, Pitch[i]) = logdata.channels["ATT"]["Pitch"].listData[i]
att_time_us = np.zeros(len(logdata.channels["ATT"]["TimeUS"].listData))
for i in range(len(logdata.channels["ATT"]["TimeUS"].listData)):
(line, att_time_us[i]) = logdata.channels["ATT"]["TimeUS"].listData[i]
else:
FAIL()
self.result.statusMessage = "FAIL: no attitude data\n"
return
# calculate the start time for the roll calibration
startTime = int(0)
startRollIndex = int(0)
for i in range(len(Roll)):
if abs(Roll[i]) > tilt_threshold:
startTime = att_time_us[i]
break
for i in range(len(flow_time_us)):
if flow_time_us[i] > startTime:
startRollIndex = i
break
# calculate the end time for the roll calibration
endTime = int(0)
endRollIndex = int(0)
for i in range(len(Roll)-1,-1,-1):
if abs(Roll[i]) > tilt_threshold:
endTime = att_time_us[i]
break
for i in range(len(flow_time_us)-1,-1,-1):
if flow_time_us[i] < endTime:
endRollIndex = i
break
# check we have enough roll data points
if (endRollIndex - startRollIndex <= min_num_points):
FAIL()
self.result.statusMessage = "FAIL: insufficient roll data pointsa\n"
return
# resample roll test data excluding data before first movement and after last movement
# also exclude data where there is insufficient angular rate
flowX_resampled = []
bodyX_resampled = []
flowX_time_us_resampled = []
for i in range(len(Roll)):
if (i >= startRollIndex) and (i <= endRollIndex) and (abs(bodyX[i]) > min_rate_threshold) and (abs(bodyX[i]) < max_rate_threshold) and (flow_qual[i] > quality_threshold):
flowX_resampled.append(flowX[i])
bodyX_resampled.append(bodyX[i])
flowX_time_us_resampled.append(flow_time_us[i])
# calculate the start time for the pitch calibration
startTime = 0
startPitchIndex = int(0)
for i in range(len(Pitch)):
if abs(Pitch[i]) > tilt_threshold:
startTime = att_time_us[i]
break
for i in range(len(flow_time_us)):
if flow_time_us[i] > startTime:
startPitchIndex = i
break
# calculate the end time for the pitch calibration
endTime = 0
endPitchIndex = int(0)
for i in range(len(Pitch)-1,-1,-1):
if abs(Pitch[i]) > tilt_threshold:
endTime = att_time_us[i]
break
for i in range(len(flow_time_us)-1,-1,-1):
if flow_time_us[i] < endTime:
endPitchIndex = i
break
# check we have enough pitch data points
if (endPitchIndex - startPitchIndex <= min_num_points):
FAIL()
self.result.statusMessage = "FAIL: insufficient pitch data pointsa\n"
return
# resample pitch test data excluding data before first movement and after last movement
# also exclude data where there is insufficient or too much angular rate
flowY_resampled = []
bodyY_resampled = []
flowY_time_us_resampled = []
for i in range(len(Roll)):
if (i >= startPitchIndex) and (i <= endPitchIndex) and (abs(bodyY[i]) > min_rate_threshold) and (abs(bodyY[i]) < max_rate_threshold) and (flow_qual[i] > quality_threshold):
flowY_resampled.append(flowY[i])
bodyY_resampled.append(bodyY[i])
flowY_time_us_resampled.append(flow_time_us[i])
# fit a straight line to the flow vs body rate data and calculate the scale factor parameter required to achieve a slope of 1
coef_flow_x , cov_x = np.polyfit(bodyX_resampled,flowX_resampled,1,rcond=None, full=False, w=None, cov=True)
coef_flow_y , cov_y = np.polyfit(bodyY_resampled,flowY_resampled,1,rcond=None, full=False, w=None, cov=True)
# taking the exisiting scale factor parameters into account, calculate the parameter values reequired to achieve a unity slope
flow_fxscaler_new = int(1000 * (((1 + 0.001 * float(flow_fxscaler))/coef_flow_x[0] - 1)))
flow_fyscaler_new = int(1000 * (((1 + 0.001 * float(flow_fyscaler))/coef_flow_y[0] - 1)))
# Do a sanity check on the scale factor variance
if sqrt(cov_x[0][0]) > param_std_threshold or sqrt(cov_y[0][0]) > param_std_threshold:
FAIL()
self.result.statusMessage = "FAIL: inaccurate fit - poor quality or insufficient data\nFLOW_FXSCALER 1STD = %u\nFLOW_FYSCALER 1STD = %u\n" % (round(1000*sqrt(cov_x[0][0])),round(1000*sqrt(cov_y[0][0])))
# Do a sanity check on the scale factors
if abs(flow_fxscaler_new) > param_abs_threshold or abs(flow_fyscaler_new) > param_abs_threshold:
FAIL()
self.result.statusMessage = "FAIL: required scale factors are excessive\nFLOW_FXSCALER=%i\nFLOW_FYSCALER=%i\n" % (flow_fxscaler,flow_fyscaler)
# display recommended scale factors
self.result.statusMessage = "Set FLOW_FXSCALER to %i\nSet FLOW_FYSCALER to %i\n\nCal plots saved to flow_calibration.pdf\nCal parameters saved to flow_calibration.param\n\nFLOW_FXSCALER 1STD = %u\nFLOW_FYSCALER 1STD = %u\n" % (flow_fxscaler_new,flow_fyscaler_new,round(1000*sqrt(cov_x[0][0])),round(1000*sqrt(cov_y[0][0])))
# calculate fit display data
body_rate_display = [-max_rate_threshold,max_rate_threshold]
fit_coef_x = np.poly1d(coef_flow_x)
flowX_display = fit_coef_x(body_rate_display)
fit_coef_y = np.poly1d(coef_flow_y)
flowY_display = fit_coef_y(body_rate_display)
# plot and save calibration test points to PDF
from matplotlib.backends.backend_pdf import PdfPages
output_plot_filename = "flow_calibration.pdf"
pp = PdfPages(output_plot_filename)
plt.figure(1,figsize=(20,13))
plt.subplot(2,1,1)
plt.plot(bodyX_resampled,flowX_resampled,'b', linestyle=' ', marker='o',label="test points")
plt.plot(body_rate_display,flowX_display,'r',linewidth=2.5,label="linear fit")
plt.title('X axis flow rate vs gyro rate')
plt.ylabel('flow rate (rad/s)')
plt.xlabel('gyro rate (rad/sec)')
plt.grid()
plt.legend(loc='upper left')
# draw plots
plt.subplot(2,1,2)
plt.plot(bodyY_resampled,flowY_resampled,'b', linestyle=' ', marker='o',label="test points")
plt.plot(body_rate_display,flowY_display,'r',linewidth=2.5,label="linear fit")
plt.title('Y axis flow rate vs gyro rate')
plt.ylabel('flow rate (rad/s)')
plt.xlabel('gyro rate (rad/sec)')
plt.grid()
plt.legend(loc='upper left')
pp.savefig()
plt.figure(2,figsize=(20,13))
plt.subplot(2,1,1)
plt.plot(flow_time_us,flowX,'b',label="flow rate - all")
plt.plot(flow_time_us,bodyX,'r',label="gyro rate - all")
plt.plot(flowX_time_us_resampled,flowX_resampled,'c', linestyle=' ', marker='o',label="flow rate - used")
plt.plot(flowX_time_us_resampled,bodyX_resampled,'m', linestyle=' ', marker='o',label="gyro rate - used")
plt.title('X axis flow and body rate vs time')
plt.ylabel('rate (rad/s)')
plt.xlabel('time (usec)')
plt.grid()
plt.legend(loc='upper left')
# draw plots
plt.subplot(2,1,2)
plt.plot(flow_time_us,flowY,'b',label="flow rate - all")
plt.plot(flow_time_us,bodyY,'r',label="gyro rate - all")
plt.plot(flowY_time_us_resampled,flowY_resampled,'c', linestyle=' ', marker='o',label="flow rate - used")
plt.plot(flowY_time_us_resampled,bodyY_resampled,'m', linestyle=' ', marker='o',label="gyro rate - used")
plt.title('Y axis flow and body rate vs time')
plt.ylabel('rate (rad/s)')
plt.xlabel('time (usec)')
plt.grid()
plt.legend(loc='upper left')
pp.savefig()
# close the pdf file
pp.close()
# close all figures
plt.close("all")
# write correction parameters to file
test_results_filename = "flow_calibration.param"
file = open(test_results_filename,"w")
file.write("FLOW_FXSCALER"+" "+str(flow_fxscaler_new)+"\n")
file.write("FLOW_FYSCALER"+" "+str(flow_fyscaler_new)+"\n")
file.close()
except KeyError as e:
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = str(e) + ' not found'
| gpl-3.0 |
derekjchow/models | research/object_detection/dataset_tools/oid_tfrecord_creation_test.py | 3 | 7258 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for oid_tfrecord_creation.py."""
import pandas as pd
import tensorflow as tf
from object_detection.dataset_tools import oid_tfrecord_creation
def create_test_data():
data = {
'ImageID': ['i1', 'i1', 'i1', 'i1', 'i1', 'i2', 'i2'],
'LabelName': ['a', 'a', 'b', 'b', 'c', 'b', 'c'],
'YMin': [0.3, 0.6, 0.8, 0.1, None, 0.0, 0.0],
'XMin': [0.1, 0.3, 0.7, 0.0, None, 0.1, 0.1],
'XMax': [0.2, 0.3, 0.8, 0.5, None, 0.9, 0.9],
'YMax': [0.3, 0.6, 1, 0.8, None, 0.8, 0.8],
'IsOccluded': [0, 1, 1, 0, None, 0, 0],
'IsTruncated': [0, 0, 0, 1, None, 0, 0],
'IsGroupOf': [0, 0, 0, 0, None, 0, 1],
'IsDepiction': [1, 0, 0, 0, None, 0, 0],
'ConfidenceImageLabel': [None, None, None, None, 0, None, None],
}
df = pd.DataFrame(data=data)
label_map = {'a': 0, 'b': 1, 'c': 2}
return label_map, df
class TfExampleFromAnnotationsDataFrameTests(tf.test.TestCase):
def test_simple(self):
label_map, df = create_test_data()
tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame(
df[df.ImageID == 'i1'], label_map, 'encoded_image_test')
self.assertProtoEquals(
"""
features {
feature {
key: "image/encoded"
value { bytes_list { value: "encoded_image_test" } } }
feature {
key: "image/filename"
value { bytes_list { value: "i1.jpg" } } }
feature {
key: "image/object/bbox/ymin"
value { float_list { value: [0.3, 0.6, 0.8, 0.1] } } }
feature {
key: "image/object/bbox/xmin"
value { float_list { value: [0.1, 0.3, 0.7, 0.0] } } }
feature {
key: "image/object/bbox/ymax"
value { float_list { value: [0.3, 0.6, 1.0, 0.8] } } }
feature {
key: "image/object/bbox/xmax"
value { float_list { value: [0.2, 0.3, 0.8, 0.5] } } }
feature {
key: "image/object/class/label"
value { int64_list { value: [0, 0, 1, 1] } } }
feature {
key: "image/object/class/text"
value { bytes_list { value: ["a", "a", "b", "b"] } } }
feature {
key: "image/source_id"
value { bytes_list { value: "i1" } } }
feature {
key: "image/object/depiction"
value { int64_list { value: [1, 0, 0, 0] } } }
feature {
key: "image/object/group_of"
value { int64_list { value: [0, 0, 0, 0] } } }
feature {
key: "image/object/occluded"
value { int64_list { value: [0, 1, 1, 0] } } }
feature {
key: "image/object/truncated"
value { int64_list { value: [0, 0, 0, 1] } } }
feature {
key: "image/class/label"
value { int64_list { value: [2] } } }
feature {
key: "image/class/text"
value { bytes_list { value: ["c"] } } } }
""", tf_example)
def test_no_attributes(self):
label_map, df = create_test_data()
del df['IsDepiction']
del df['IsGroupOf']
del df['IsOccluded']
del df['IsTruncated']
del df['ConfidenceImageLabel']
tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame(
df[df.ImageID == 'i2'], label_map, 'encoded_image_test')
self.assertProtoEquals("""
features {
feature {
key: "image/encoded"
value { bytes_list { value: "encoded_image_test" } } }
feature {
key: "image/filename"
value { bytes_list { value: "i2.jpg" } } }
feature {
key: "image/object/bbox/ymin"
value { float_list { value: [0.0, 0.0] } } }
feature {
key: "image/object/bbox/xmin"
value { float_list { value: [0.1, 0.1] } } }
feature {
key: "image/object/bbox/ymax"
value { float_list { value: [0.8, 0.8] } } }
feature {
key: "image/object/bbox/xmax"
value { float_list { value: [0.9, 0.9] } } }
feature {
key: "image/object/class/label"
value { int64_list { value: [1, 2] } } }
feature {
key: "image/object/class/text"
value { bytes_list { value: ["b", "c"] } } }
feature {
key: "image/source_id"
value { bytes_list { value: "i2" } } } }
""", tf_example)
def test_label_filtering(self):
label_map, df = create_test_data()
label_map = {'a': 0}
tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame(
df[df.ImageID == 'i1'], label_map, 'encoded_image_test')
self.assertProtoEquals(
"""
features {
feature {
key: "image/encoded"
value { bytes_list { value: "encoded_image_test" } } }
feature {
key: "image/filename"
value { bytes_list { value: "i1.jpg" } } }
feature {
key: "image/object/bbox/ymin"
value { float_list { value: [0.3, 0.6] } } }
feature {
key: "image/object/bbox/xmin"
value { float_list { value: [0.1, 0.3] } } }
feature {
key: "image/object/bbox/ymax"
value { float_list { value: [0.3, 0.6] } } }
feature {
key: "image/object/bbox/xmax"
value { float_list { value: [0.2, 0.3] } } }
feature {
key: "image/object/class/label"
value { int64_list { value: [0, 0] } } }
feature {
key: "image/object/class/text"
value { bytes_list { value: ["a", "a"] } } }
feature {
key: "image/source_id"
value { bytes_list { value: "i1" } } }
feature {
key: "image/object/depiction"
value { int64_list { value: [1, 0] } } }
feature {
key: "image/object/group_of"
value { int64_list { value: [0, 0] } } }
feature {
key: "image/object/occluded"
value { int64_list { value: [0, 1] } } }
feature {
key: "image/object/truncated"
value { int64_list { value: [0, 0] } } }
feature {
key: "image/class/label"
value { int64_list { } } }
feature {
key: "image/class/text"
value { bytes_list { } } } }
""", tf_example)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
codez266/codez266.github.io | markdown_generator/talks.py | 2 | 4111 |
# coding: utf-8
# # Talks markdown generator for academicpages
#
# Takes a TSV of talks with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `talks.py`. Run either from the `markdown_generator` folder after replacing `talks.tsv` with one containing your data.
#
# TODO: Make this work with BibTex and other databases, rather than Stuart's non-standard TSV format and citation style.
# In[1]:
import pandas as pd
import os
# ## Data format
#
# The TSV needs to have the following columns: title, type, url_slug, venue, date, location, talk_url, description, with a header at the top. Many of these fields can be blank, but the columns must be in the TSV.
#
# - Fields that cannot be blank: `title`, `url_slug`, `date`. All else can be blank. `type` defaults to "Talk"
# - `date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper.
# - The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/talks/YYYY-MM-DD-[url_slug]`
# - The combination of `url_slug` and `date` must be unique, as it will be the basis for your filenames
#
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
talks = pd.read_csv("talks.tsv", sep="\t", header=0)
talks
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
if type(text) is str:
return "".join(html_escape_table.get(c,c) for c in text)
else:
return "False"
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page.
# In[5]:
loc_dict = {}
for row, item in talks.iterrows():
md_filename = str(item.date) + "-" + item.url_slug + ".md"
html_filename = str(item.date) + "-" + item.url_slug
year = item.date[:4]
md = "---\ntitle: \"" + item.title + '"\n'
md += "collection: talks" + "\n"
if len(str(item.type)) > 3:
md += 'type: "' + item.type + '"\n'
else:
md += 'type: "Talk"\n'
md += "permalink: /talks/" + html_filename + "\n"
if len(str(item.venue)) > 3:
md += 'venue: "' + item.venue + '"\n'
if len(str(item.location)) > 3:
md += "date: " + str(item.date) + "\n"
if len(str(item.location)) > 3:
md += 'location: "' + str(item.location) + '"\n'
md += "---\n"
if len(str(item.talk_url)) > 3:
md += "\n[More information here](" + item.talk_url + ")\n"
if len(str(item.description)) > 3:
md += "\n" + html_escape(item.description) + "\n"
md_filename = os.path.basename(md_filename)
#print(md)
with open("../_talks/" + md_filename, 'w') as f:
f.write(md)
# These files are in the talks directory, one directory below where we're working from.
| mit |
lbishal/scikit-learn | examples/neighbors/plot_digits_kde_sampling.py | 108 | 2026 | """
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
| bsd-3-clause |
underbluewaters/marinemap | lingcod/spacing/models.py | 3 | 17201 | from django.contrib.gis.db import models
from django.contrib.gis import geos
from django.contrib.gis.measure import *
from django.core.files import File
from django.db import connection
from django.conf import settings
from lingcod.unit_converter.models import length_in_display_units, area_in_display_units
from exceptions import AttributeError
import os
import tempfile
import pickle
import datetime
import networkx as nx
### Display Methods ###
# These methods are used by the spacing views
def kml_doc_from_geometry_list(geom_list, template='general.kml'):
out_dict = {}
placemarks = []
for geom in geom_list:
placemarks.append( kml_placemark_from_geom(geom) )
out_dict['placemarks'] = placemarks
from django.template import Context, Template
from django.template.loader import get_template
t = get_template(template)
response = t.render(Context({ 'kml': out_dict }))
return response
def kml_doc_from_queryset(qs, template='general.kml'):
dict = {}
placemarks = []
for item in qs:
placemarks.append( kml_placemark(item) )
dict['placemarks'] = placemarks
from django.template import Context, Template
from django.template.loader import get_template
t = get_template(template)
response = t.render(Context({ 'kml': dict }))
return response
def kml_placemark_from_geom(geom, styleUrl='#default'):
geom.transform(settings.GEOMETRY_CLIENT_SRID)
style = '<styleUrl>%s</styleUrl>' % styleUrl
return_kml = '<Placemark>%s%s</Placemark>' % (style,geom.kml)
return return_kml
def kml_placemark(qs_item, styleUrl='#default', geo_field='geometry'):
geom = qs_item.__getattribute__(geo_field)
geom.transform(4326)
try:
name = qs_item.name
except AttributeError:
name = qs_item.model.__name__
name = '<Name>%s</Name>' % name
style = '<styleUrl>%s</styleUrl>' % styleUrl
return_kml = '<Placemark>%s%s%s</Placemark>' % (name,style,geom.kml)
return return_kml
### End Display Methods ###
class PickledGraph(models.Model):
"""
This model gives us someplace to put our pickle. No, really that's
what it does. There should only be one record in this model at any
given time. This model just stores THE graph.
"""
pickled_graph = models.FileField(upload_to='spacing/pickled_graphs')
date_modified = models.DateTimeField(auto_now_add=True,auto_now=True,default=datetime.datetime.now())
@property
def graph(self):
f = open(self.pickled_graph.path,'r')
graph = pickle.load(f)
return graph
def create_pickled_graph(verbose=False):
# get rid of existing
PickledGraph.objects.all().delete()
tf = tempfile.NamedTemporaryFile()
graph = nx.Graph()
graph = add_land_to_graph(graph,verbose=verbose)
# add spacing points to graph
points = [ sp.geometry for sp in SpacingPoint.objects.all() ]
graph = add_points_to_graph(points,graph)
pickle.dump(graph, tf)
pg = PickledGraph()
pg.pickled_graph = File(tf)
pg.save()
tf.close()
return graph
class Land(models.Model):
"""
This is where a simplified polygon representation of land gets stored. The greater the number of verticies, the slower the distance analysis
so don't get too fancy. Land can be made up of multiple polygons but each polygon gets it's own single polygon record.
"""
name = models.CharField(max_length=200, null=True, blank=True)
geometry = models.PolygonField(srid=settings.GEOMETRY_DB_SRID,null=True, blank=True)
date_modified = models.DateTimeField(auto_now_add=True,auto_now=True,default=datetime.datetime.now())
objects = models.GeoManager()
def add_hull_nodes_to_graph(self, graph):
"""
This is for only adding the nodes of the convex hull to the graph. I don't think this will be used in most cases but,
in some cases, it could be effective at reducing the number of nodes in the graph and speeding things up.
"""
poly = self.geometry#.buffer(5).simplify(1)
graph.add_nodes_from([geos.Point(p) for p in poly.convex_hull.shell])
return graph
def add_nodes_to_graph(self, graph):
poly = self.geometry
graph.add_nodes_from([geos.Point(p) for p in poly.shell])
return graph
def create_hull(self):
"""
probably don't need this method in the long run because we won't really need to keep the hull
"""
hull, created = Hull.objects.get_or_create(land=self)
hull.geometry = self.geometry.convex_hull
hull.save()
def geometry_kml(self):
geom = self.geometry
geom.transform(4326)
return geom.kml
def kml(self):
from django.template import Context, Template
from django.template.loader import get_template
t = get_template('land.kml')
response = t.render(Context({ 'land': self }))
return response
def simplify(self, tolerance=500):
self.geometry = self.geometry.simplify(tolerance=tolerance, preserve_topology=True)
self.geometry = geos.Polygon(self.geometry.exterior_ring)
self.save()
### Spacing matrix models and methods ###
# This stuff is related to building a spacing matrix for a set of points
class SpacingPoint(models.Model):
"""
This model is used when generating a spacing matrix. Points contained here will be added to the array
of points that you are creating a spacing matrix for. In the MLPA process this is used to add the
points at the northern and southern extreme of the study region.
"""
name = models.CharField(max_length=200)
geometry = models.PointField(srid=settings.GEOMETRY_DB_SRID)
objects = models.GeoManager()
def __unicode__(self):
return unicode(self.name)
def all_spacing_points_dict():
"""
Returns a dictionary of the form: { point: 'name' } for all objects in SpacingPoint
"""
return dict( [ (p.geometry,p.name) for p in SpacingPoint.objects.all() ] )
def add_all_spacing_points(in_dict):
"""
Takes a dictionary of the form: { point: 'name' }, and adds all the objects in SpacingPoint
"""
in_dict.update(all_spacing_points_dict())
return in_dict
def distance_row_dict(from_dict, to_dict):
"""
from_dict will be a dict with a point as the key and a label as the value.
to_dict will be of the same format with multiple entries.
will return a dictionary with points as keys and a dictionary as values.
NOTE: This method assumes that the projection units are meters.
"""
from_pnt = from_dict.keys()[0]
for s_pnt in SpacingPoint.objects.all():
to_dict.update({s_pnt.geometry:s_pnt.name})
result = {}
for point, pnt_label in to_dict.iteritems():
result[point] = {
'label': pnt_label,
'distance': length_in_display_units(point.distance(from_pnt)),
'sort': point.y
}
return result
def distance_row_list(from_pnt, to_list, straight_line=False, with_geom=False):
"""
NOTE: This method assumes that the projection units are meters. This should be changed. Check out
lingcod.unit_converter.models. It's pretty easy to introspect the geometry for the srid and figure
out the native units from that. I'd fix it but this is my last week here before taking a big trip
on a small boat.
"""
result = []
for point in to_list:
point_pair_dict = {}
if straight_line:
point_pair_dict.update( {'distance': length_in_display_units(point.distance(from_pnt)) } )
if with_geom:
line = geos.LineString(point,from_pnt)
else:
distance, line = fish_distance_from_edges(from_pnt,point)
point_pair_dict.update( {'distance': distance} )
if with_geom:
point_pair_dict.update( {'geometry': line} )
result.append(point_pair_dict)
return result
def distance_matrix(point_list, straight_line=False, with_geom=False):
result = []
for point in point_list:
result.append(distance_row_list(point,point_list,straight_line=straight_line,with_geom=with_geom))
return result
def sorted_points_and_labels(in_dict):
"""
in_dict will look like:
{ point: 'name' }
sorted_points, sorted_labels (both lists) will be returned in a dictionary and they'll be
ordered from North to South.
I added in an if statement that makes this method work with other geometry types aside from
points. I should change the name of the method to make for sense but I'm going to put that
off until later.
"""
sorted_points = []
sorted_labels = []
y_dict = {}
for point, name in in_dict.iteritems():
# adapt this to work with other geometry types:
if point.__class__.__name__.lower() == 'point':
y_dict.update( { point.y: point } )
else:
y_dict.update( { point.centroid.y: point })
y_list = y_dict.keys()
y_list.sort()
for y in reversed(y_list):
sorted_points.append(y_dict[y])
sorted_labels.append(in_dict[y_dict[y]])
return { 'points': sorted_points, 'labels': sorted_labels }
def distance_matrix_and_labels(in_dict,add_spacing_points=True,straight_line=False,with_geom=False):
"""
in_dict will look like:
{ point: 'name' }
Will return a dictionary with the keys 'labels' and 'matrix'
"""
if add_spacing_points:
in_dict = add_all_spacing_points(in_dict)
spl_dict = sorted_points_and_labels(in_dict)
dist_mat = distance_matrix(spl_dict['points'], straight_line=straight_line, with_geom=with_geom)
return { 'labels': spl_dict['labels'], 'matrix': dist_mat }
### End of spacing matrix methods ###
def add_points_to_graph(points,graph):
"""
points is a list of points. graph is a NetworkX graph.
"""
graph.add_nodes_from(points)
for pnt in points:
graph = add_ocean_edges_for_node(graph,get_node_from_point(graph, pnt))
return graph
def fish_distance(point1,point2):
"""
Returns the shortest distance around land (see the Land model) between the two points. Returns the distance in miles and
the geos linestring that represents the path.
NOTE: I'm assuming that the native units of the points and line is meters. This is true for the MLPA project but may
not be true for other processes.
"""
# This is the straight line between the two points
line = geos.LineString(point1,point2)
# See if the straight line crosses land
if line_crosses_land(line):
# The straight line cut across land so we have to do it the hard way.
G = PickledGraph.objects.all()[0].graph
G = add_points_to_graph([point1,point2],G)
# G.add_nodes_from([point1,point2])
# G = add_ocean_edges_for_node(G,get_node_from_point(G,point1))
# G = add_ocean_edges_for_node(G,get_node_from_point(G,point2))
# Replace the straight line with the shortest path around land
line = geos.LineString( nx.dijkstra_path(G,get_node_from_point(G,point1),get_node_from_point(G,point2)) )
line.srid = settings.GEOMETRY_DB_SRID
# Figure out the distance of the line (straight or otherwise) in miles
distance = length_in_display_units(line)
return distance, line
def fish_distance_from_edges(geom1,geom2):
"""
"""
# Straight line between geoms
line = shortest_line(geom1,geom2)
# See if the line crosses land
if line_crosses_land(line):
# Get shortest centroid to centroid fish_distance line
c_distance, c_line = fish_distance(geom1.centroid,geom2.centroid)
# Replace the first point in the fish path with the point on geom1
# that lies closest to the second point on the path.
#print c_line[1]
c_line[0] = closest_point(geom1, geos.Point( c_line.coords[1], srid=geom1.srid ) ).coords
# Do the same for the last point in the path
c_line[c_line.num_points - 1] = closest_point(geom2, geos.Point( c_line.coords[c_line.num_points - 2], srid=geom2.srid ) ).coords
line = c_line
# Adjust the distance
distance = length_in_display_units(line)
return distance, line
def get_node_from_point(graph, point):
for node in graph.nodes_iter():
if node == point:
return node
def position_dictionary(graph):
"""
can be used by nx.draw to position nodes with matplotlib. This is not used in the general functioning of
the spacing app but is useful for visual testing. If you have a networkx graph G, then you can use it like:
nx.draw(G,pos=position_dictionary(G)) To see the result, use matplotlib.pyplot.show()
"""
pos = {}
for n in graph.nodes_iter():
pos[n] = (n.x, n.y)
return pos
def add_land_to_graph(graph, hull_only=False, verbose=False):
if verbose:
print 'Adding land nodes to graph'
for l in Land.objects.iterator():
if hull_only:
graph = l.add_hull_nodes_to_graph(graph)
else:
graph = l.add_nodes_to_graph(graph)
graph = add_ocean_edges_complete(graph,verbose=verbose)
return graph
def points_from_graph(graph):
"""
Return a list of points from a graph.
"""
g_nodes = graph.nodes()
for point in g_nodes:
if point.srid == None:
point.srid = settings.GEOMETRY_DB_SRID
return g_nodes
def lines_from_graph(graph):
"""
Return a list of lines made from the edges of a graph.
"""
lines = []
for g_edge in graph.edges():
line = geos.LineString(g_edge[0],g_edge[1])
line.srid = settings.GEOMETRY_DB_SRID
lines.append(line)
return lines
def line_crosses_land(line):
land = Land.objects.all()
crosses = False
for l in land:
if line.intersects(l.geometry.buffer(-1)):
crosses = True
return crosses
def add_ocean_edges_for_node(graph, node):
for n in graph:
line = geos.LineString(node,n)
if not line_crosses_land(line):
graph.add_edge(node,n,{'weight': length_in_display_units(node.distance(n))})
return graph
def add_ocean_edges_complete(graph, verbose=False):
if verbose:
cnt = 1
import time
t0 = time.time()
print "Starting at %s to add edges for %i nodes." % (time.asctime(time.localtime(t0)), graph.number_of_nodes() )
edge_possibilities = graph.number_of_nodes() * (graph.number_of_nodes() -1)
print "We'll have to look at somewhere around %i edge possibilities." % ( edge_possibilities )
print "Node: ",
for node in graph.nodes_iter():
if verbose:
print str(cnt) + ' ',
cnt += 1
for n in graph.nodes_iter():
if node <> n:
line = geos.LineString(node,n)
if not line_crosses_land(line):
graph.add_edge(node,n,{'weight': length_in_display_units(node.distance(n))})
if verbose:
print "It took %i minutes to load %i edges." % ((time.time() - t0)/60, graph.number_of_edges() )
return graph
def shortest_line(geom1,geom2):
"""
Use the PostGIS function st_shortestline() to find the shortest line between two geometries. This requires
PostGIS 1.5 or newer. This will return a line geometry that represents the shortest line between the two
given geometries. Seems to work with any geometry type including geometry collections.
"""
cursor = connection.cursor()
query = "select st_astext( st_shortestline('%s'::geometry, '%s'::geometry) ) as sline;" % (geom1.wkt, geom2.wkt)
cursor.execute(query)
return geos.fromstr(cursor.fetchone()[0])
def closest_point(geom1,geom2):
"""
Use the PostGIS function ST_ClosestPoint() to return the 2-dimensional point on geom1 that is closest to geom2. This requires
PostGIS 1.5 or newer.
"""
cursor = connection.cursor()
query = "select st_asewkt( ST_ClosestPoint('%s'::geometry, '%s'::geometry) ) as sline;" % (geom1.ewkt, geom2.ewkt)
cursor.execute(query)
return geos.fromstr(cursor.fetchone()[0])
def clean_geometry(qs_item):
cursor = connection.cursor()
query = "update %s set geometry = cleangeometry(geometry) where %s = %i" % (qs_item._meta.db_table, qs_item._meta.pk.attname, qs_item.pk)
cursor.execute(query)
connection._commit()
def clean_query_set_geometries(qs):
for qs_item in qs:
if not qs_item.geometry.valid:
clean_geometry(qs_item)
| bsd-3-clause |
robk5uj/invenio | modules/webstat/lib/webstat_engine.py | 3 | 92963 | ## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
__lastupdated__ = "$Date$"
import calendar, commands, datetime, time, os, cPickle, random, cgi
from operator import itemgetter
from invenio.config import CFG_TMPDIR, \
CFG_SITE_URL, \
CFG_SITE_NAME, \
CFG_BINDIR, \
CFG_CERN_SITE
from invenio.bibindex_engine import CFG_JOURNAL_TAG
from invenio.urlutils import redirect_to_url
from invenio.search_engine import perform_request_search, \
get_collection_reclist, \
get_most_popular_field_values
from invenio.search_engine_utils import get_fieldvalues
from invenio.dbquery import run_sql, \
wash_table_column_name
from invenio.websubmitadmin_dblayer import get_docid_docname_alldoctypes
from invenio.bibcirculation_utils import book_title_from_MARC, \
book_information_from_MARC
from invenio.bibcirculation_dblayer import get_id_bibrec, \
get_borrower_data
WEBSTAT_SESSION_LENGTH = 48 * 60 * 60 # seconds
WEBSTAT_GRAPH_TOKENS = '-=#+@$%&XOSKEHBC'
# KEY EVENT TREND SECTION
def get_keyevent_trend_collection_population(args, return_sql=False):
"""
Returns the quantity of documents in Invenio for
the given timestamp range.
@param args['collection']: A collection name
@type args['collection']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# collect action dates
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
if args.get('collection', 'All') == 'All':
sql_query_g = _get_sql_query("creation_date", args['granularity'],
"bibrec")
sql_query_i = "SELECT COUNT(id) FROM bibrec WHERE creation_date < %s"
initial_quantity = run_sql(sql_query_i, (lower, ))[0][0]
return _get_keyevent_trend(args, sql_query_g, initial_quantity=initial_quantity,
return_sql=return_sql, sql_text=
"Previous count: %s<br />Current count: %%s" % (sql_query_i),
acumulative=True)
else:
ids = get_collection_reclist(args['collection'])
if len(ids) == 0:
return []
g = get_keyevent_trend_new_records(args, return_sql, True)
sql_query_i = "SELECT id FROM bibrec WHERE creation_date < %s"
if return_sql:
return "Previous count: %s<br />Current count: %s" % (sql_query_i % lower, g)
initial_quantity = len(filter(lambda x: x[0] in ids, run_sql(sql_query_i, (lower, ))))
return _get_trend_from_actions(g, initial_quantity, args['t_start'],
args['t_end'], args['granularity'], args['t_format'], acumulative=True)
def get_keyevent_trend_new_records(args, return_sql=False, only_action=False):
"""
Returns the number of new records uploaded during the given timestamp range.
@param args['collection']: A collection name
@type args['collection']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
if args.get('collection', 'All') == 'All':
return _get_keyevent_trend(args, _get_sql_query("creation_date", args['granularity'],
"bibrec"),
return_sql=return_sql)
else:
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
ids = get_collection_reclist(args['collection'])
if len(ids) == 0:
return []
sql = _get_sql_query("creation_date", args["granularity"], "bibrec",
extra_select=", id", group_by=False, count=False)
if return_sql:
return sql % (lower, upper)
recs = run_sql(sql, (lower, upper))
if recs:
def add_count(i_list, element):
""" Reduce function to create a dictionary with the count of ids
for each date """
if i_list and element == i_list[-1][0]:
i_list[-1][1] += 1
else:
i_list.append([element, 1])
return i_list
action_dates = reduce(add_count,
map(lambda x: x[0], filter(lambda x: x[1] in ids, recs)),
[])
else:
action_dates = []
if only_action:
return action_dates
return _get_trend_from_actions(action_dates, 0, args['t_start'],
args['t_end'], args['granularity'], args['t_format'])
def get_keyevent_trend_search_frequency(args, return_sql=False):
"""
Returns the number of searches (of any kind) carried out
during the given timestamp range.
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
return _get_keyevent_trend(args, _get_sql_query("date", args["granularity"],
"query INNER JOIN user_query ON id=id_query"),
return_sql=return_sql)
def get_keyevent_trend_comments_frequency(args, return_sql=False):
"""
Returns the number of comments (of any kind) carried out
during the given timestamp range.
@param args['collection']: A collection name
@type args['collection']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
if args.get('collection', 'All') == 'All':
sql = _get_sql_query("date_creation", args["granularity"],
"cmtRECORDCOMMENT")
else:
sql = _get_sql_query("date_creation", args["granularity"],
"cmtRECORDCOMMENT", conditions=
_get_collection_recids_for_sql_query(args['collection']))
return _get_keyevent_trend(args, sql, return_sql=return_sql)
def get_keyevent_trend_search_type_distribution(args, return_sql=False):
"""
Returns the number of searches carried out during the given
timestamp range, but also partion them by type Simple and
Advanced.
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# SQL to determine all simple searches:
simple = _get_sql_query("date", args["granularity"],
"query INNER JOIN user_query ON id=id_query",
conditions="urlargs LIKE '%%p=%%'")
# SQL to determine all advanced searches:
advanced = _get_sql_query("date", args["granularity"],
"query INNER JOIN user_query ON id=id_query",
conditions="urlargs LIKE '%%as=1%%'")
# Compute the trend for both types
s_trend = _get_keyevent_trend(args, simple,
return_sql=return_sql, sql_text="Simple: %s")
a_trend = _get_keyevent_trend(args, advanced,
return_sql=return_sql, sql_text="Advanced: %s")
# Assemble, according to return type
if return_sql:
return "%s <br /> %s" % (s_trend, a_trend)
return [(s_trend[i][0], (s_trend[i][1], a_trend[i][1]))
for i in range(len(s_trend))]
def get_keyevent_trend_download_frequency(args, return_sql=False):
"""
Returns the number of full text downloads carried out
during the given timestamp range.
@param args['collection']: A collection name
@type args['collection']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# Collect list of timestamps of insertion in the specific collection
if args.get('collection', 'All') == 'All':
return _get_keyevent_trend(args, _get_sql_query("download_time",
args["granularity"], "rnkDOWNLOADS"), return_sql=return_sql)
else:
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
ids = get_collection_reclist(args['collection'])
if len(ids) == 0:
return []
sql = _get_sql_query("download_time", args["granularity"], "rnkDOWNLOADS",
extra_select=", GROUP_CONCAT(id_bibrec)")
if return_sql:
return sql % (lower, upper)
action_dates = []
for result in run_sql(sql, (lower, upper)):
count = result[1]
for id in result[2].split(","):
if id == '' or not int(id) in ids:
count -= 1
action_dates.append((result[0], count))
return _get_trend_from_actions(action_dates, 0, args['t_start'],
args['t_end'], args['granularity'], args['t_format'])
def get_keyevent_trend_number_of_loans(args, return_sql=False):
"""
Returns the number of loans carried out
during the given timestamp range.
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
return _get_keyevent_trend(args, _get_sql_query("loaned_on",
args["granularity"], "crcLOAN"), return_sql=return_sql)
def get_keyevent_trend_web_submissions(args, return_sql=False):
"""
Returns the quantity of websubmissions in Invenio for
the given timestamp range.
@param args['doctype']: A doctype name
@type args['doctype']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
if args['doctype'] == 'all':
sql = _get_sql_query("cd", args["granularity"], "sbmSUBMISSIONS",
conditions="action='SBI' AND status='finished'")
res = _get_keyevent_trend(args, sql, return_sql=return_sql)
else:
sql = _get_sql_query("cd", args["granularity"], "sbmSUBMISSIONS",
conditions="doctype=%s AND action='SBI' AND status='finished'")
res = _get_keyevent_trend(args, sql, extra_param=[args['doctype']],
return_sql=return_sql)
return res
def get_keyevent_loan_statistics(args, return_sql=False):
"""
Data:
- Number of documents (=records) loaned
- Number of items loaned on the total number of items
- Number of items never loaned on the total number of items
- Average time between the date of the record creation and the date of the first loan
Filter by
- in a specified time span
- by UDC (see MARC field 080__a - list to be submitted)
- by item status (available, missing)
- by date of publication (MARC field 260__c)
- by date of the record creation in the database
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['item_status']: available, missing...
@type args['item_status']: str
@param args['publication_date']: MARC field 260__c
@type args['publication_date']: str
@param args['creation_date']: date of the record creation in the database
@type args['creation_date']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# collect action dates
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcLOAN l "
sql_where = "WHERE loaned_on > %s AND loaned_on < %s "
param = [lower, upper]
if 'udc' in args and args['udc'] != '':
sql_where += "AND l." + _check_udc_value_where()
param.append(_get_udc_truncated(args['udc']))
if 'item_status' in args and args['item_status'] != '':
sql_from += ", crcITEM i "
sql_where += "AND l.barcode = i.barcode AND i.status = %s "
param.append(args['item_status'])
if 'publication_date' in args and args['publication_date'] != '':
sql_where += "AND l.id_bibrec IN ( SELECT brb.id_bibrec \
FROM bibrec_bib26x brb, bib26x b WHERE brb.id_bibxxx = b.id AND tag='260__c' \
AND value LIKE %s)"
param.append('%%%s%%' % args['publication_date'])
if 'creation_date' in args and args['creation_date'] != '':
sql_from += ", bibrec br "
sql_where += "AND br.id=l.id_bibrec AND br.creation_date LIKE %s "
param.append('%%%s%%' % args['creation_date'])
param = tuple(param)
# Number of loans:
loans_sql = "SELECT COUNT(DISTINCT l.id_bibrec) " + sql_from + sql_where
items_loaned_sql = "SELECT COUNT(DISTINCT l.barcode) " + sql_from + sql_where
# Only the CERN site wants the items of the collection "Books & Proceedings"
if CFG_CERN_SITE:
items_in_book_coll = _get_collection_recids_for_sql_query("Books & Proceedings")
if items_in_book_coll == "":
total_items_sql = 0
else:
total_items_sql = "SELECT COUNT(*) FROM crcITEM WHERE %s" % \
items_in_book_coll
else: # The rest take all the items
total_items_sql = "SELECT COUNT(*) FROM crcITEM"
# Average time between the date of the record creation and the date of the first loan
avg_sql = "SELECT AVG(DATEDIFF(loaned_on, br.creation_date)) " + sql_from
if not ('creation_date' in args and args['creation_date'] != ''):
avg_sql += ", bibrec br "
avg_sql += sql_where
if not ('creation_date' in args and args['creation_date'] != ''):
avg_sql += "AND br.id=l.id_bibrec "
if return_sql:
return "<ol><li>%s</li><li>Items loaned * 100 / Number of items <ul><li>\
Items loaned: %s </li><li>Number of items: %s</li></ul></li><li>100 - Items \
loaned on total number of items</li><li>%s</li></ol>" % \
(loans_sql % param, items_loaned_sql % param, total_items_sql, avg_sql % param)
loans = run_sql(loans_sql, param)[0][0]
items_loaned = run_sql(items_loaned_sql, param)[0][0]
if total_items_sql:
total_items = run_sql(total_items_sql)[0][0]
else:
total_items = 0
if total_items == 0:
loaned_on_total = 0
never_loaned_on_total = 0
else:
# Number of items loaned on the total number of items:
loaned_on_total = float(items_loaned) * 100 / float(total_items)
# Number of items never loaned on the total number of items:
never_loaned_on_total = 100L - loaned_on_total
avg = run_sql(avg_sql, param)[0][0]
if avg:
avg = float(avg)
else:
avg = 0L
return ((loans, ), (loaned_on_total, ), (never_loaned_on_total, ), (avg, ))
def get_keyevent_loan_lists(args, return_sql=False, limit=50):
"""
Lists:
- List of documents (= records) never loaned
- List of most loaned documents (columns: number of loans,
number of copies and the creation date of the record, in
order to calculate the number of loans by copy), sorted
by decreasing order (50 items)
Filter by
- in a specified time span
- by UDC (see MARC field 080__a - list to be submitted)
- by loan period (4 week loan, one week loan...)
- by a certain number of loans
- by date of publication (MARC field 260__c)
- by date of the record creation in the database
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['loan_period']: 4 week loan, one week loan...
@type args['loan_period']: str
@param args['min_loan']: minimum number of loans
@type args['min_loan']: int
@param args['max_loan']: maximum number of loans
@type args['max_loan']: int
@param args['publication_date']: MARC field 260__c
@type args['publication_date']: str
@param args['creation_date']: date of the record creation in the database
@type args['creation_date']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_where = []
param = []
sql_from = ""
if 'udc' in args and args['udc'] != '':
sql_where.append("i." + _check_udc_value_where())
param.append(_get_udc_truncated(args['udc']))
if 'loan_period' in args and args['loan_period'] != '':
sql_where.append("loan_period = %s")
param.append(args['loan_period'])
if 'publication_date' in args and args['publication_date'] != '':
sql_where.append("i.id_bibrec IN ( SELECT brb.id_bibrec \
FROM bibrec_bib26x brb, bib26x b WHERE brb.id_bibxxx = b.id AND tag='260__c' \
AND value LIKE %s)")
param.append('%%%s%%' % args['publication_date'])
if 'creation_date' in args and args['creation_date'] != '':
sql_from += ", bibrec br"
sql_where.append("br.id=i.id_bibrec AND br.creation_date LIKE %s")
param.append('%%%s%%' % args['creation_date'])
if sql_where:
sql_where = "WHERE %s AND" % " AND ".join(sql_where)
else:
sql_where = "WHERE"
param = tuple(param + [lower, upper])
# SQL for both queries
check_num_loans = "HAVING "
if 'min_loans' in args and args['min_loans'] != '':
check_num_loans += "COUNT(*) >= %s" % args['min_loans']
if 'max_loans' in args and args['max_loans'] != '' and args['max_loans'] != 0:
if check_num_loans != "HAVING ":
check_num_loans += " AND "
check_num_loans += "COUNT(*) <= %s" % args['max_loans']
# Optimized to get all the data in only one query (not call get_fieldvalues several times)
mldocs_sql = "SELECT i.id_bibrec, COUNT(*) \
FROM crcLOAN l, crcITEM i%s %s l.barcode=i.barcode AND type = 'normal' AND \
loaned_on > %%s AND loaned_on < %%s GROUP BY i.id_bibrec %s" % \
(sql_from, sql_where, check_num_loans)
limit_n = ""
if limit > 0:
limit_n = "LIMIT %d" % limit
nldocs_sql = "SELECT id_bibrec, COUNT(*) FROM crcITEM i%s %s \
barcode NOT IN (SELECT id_bibrec FROM crcLOAN WHERE loaned_on > %%s AND \
loaned_on < %%s AND type = 'normal') GROUP BY id_bibrec ORDER BY COUNT(*) DESC %s" % \
(sql_from, sql_where, limit_n)
items_sql = "SELECT id_bibrec, COUNT(*) items FROM crcITEM GROUP BY id_bibrec"
creation_date_sql = "SELECT creation_date FROM bibrec WHERE id=%s"
authors_sql = "SELECT bx.value FROM bib10x bx, bibrec_bib10x bibx \
WHERE bx.id = bibx.id_bibxxx AND bx.tag LIKE '100__a' AND bibx.id_bibrec=%s"
title_sql = "SELECT GROUP_CONCAT(bx.value SEPARATOR ' ') value FROM bib24x bx, bibrec_bib24x bibx \
WHERE bx.id = bibx.id_bibxxx AND bx.tag LIKE %s AND bibx.id_bibrec=%s GROUP BY bibx.id_bibrec"
edition_sql = "SELECT bx.value FROM bib25x bx, bibrec_bib25x AS bibx \
WHERE bx.id = bibx.id_bibxxx AND bx.tag LIKE '250__a' AND bibx.id_bibrec=%s"
if return_sql:
return "Most loaned: %s<br \>Never loaned: %s" % \
(mldocs_sql % param, nldocs_sql % param)
mldocs = run_sql(mldocs_sql, param)
items = dict(run_sql(items_sql))
order_m = []
for mldoc in mldocs:
order_m.append([mldoc[0], mldoc[1], items[mldoc[0]], \
float(mldoc[1]) / float(items[mldoc[0]])])
order_m = sorted(order_m, key=itemgetter(3))
order_m.reverse()
# Check limit values
if limit > 0:
order_m = order_m[:limit]
res = [("", "Title", "Author", "Edition", "Number of loans",
"Number of copies", "Date of creation of the record")]
for mldoc in order_m:
res.append(("Most loaned documents",
_check_empty_value(run_sql(title_sql, ('245__%%', mldoc[0], ))),
_check_empty_value(run_sql(authors_sql, (mldoc[0], ))),
_check_empty_value(run_sql(edition_sql, (mldoc[0], ))),
mldoc[1], mldoc[2],
_check_empty_value(run_sql(creation_date_sql, (mldoc[0], )))))
nldocs = run_sql(nldocs_sql, param)
for nldoc in nldocs:
res.append(("Not loaned documents",
_check_empty_value(run_sql(title_sql, ('245__%%', nldoc[0], ))),
_check_empty_value(run_sql(authors_sql, (nldoc[0], ))),
_check_empty_value(run_sql(edition_sql, (nldoc[0], ))),
0, items[nldoc[0]],
_check_empty_value(run_sql(creation_date_sql, (nldoc[0], )))))
# nldocs = run_sql(nldocs_sql, param_n)
return (res)
def get_keyevent_renewals_lists(args, return_sql=False, limit=50):
"""
Lists:
- List of most renewed items stored by decreasing order (50 items)
Filter by
- in a specified time span
- by UDC (see MARC field 080__a - list to be submitted)
- by collection
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['collection']: collection of the record
@type args['collection']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcLOAN l, crcITEM i "
sql_where = "WHERE loaned_on > %s AND loaned_on < %s AND i.barcode = l.barcode "
param = [lower, upper]
if 'udc' in args and args['udc'] != '':
sql_where += "AND l." + _check_udc_value_where()
param.append(_get_udc_truncated(args['udc']))
filter_coll = False
if 'collection' in args and args['collection'] != '':
filter_coll = True
recid_list = get_collection_reclist(args['collection'])
param = tuple(param)
if limit > 0:
limit = "LIMIT %d" % limit
else:
limit = ""
sql = "SELECT i.id_bibrec, SUM(number_of_renewals) %s %s \
GROUP BY i.id_bibrec ORDER BY SUM(number_of_renewals) DESC %s" \
% (sql_from, sql_where, limit)
if return_sql:
return sql % param
# Results:
res = [("Title", "Author", "Edition", "Number of renewals")]
for rec, renewals in run_sql(sql, param):
if filter_coll and rec not in recid_list:
continue
author = get_fieldvalues(rec, "100__a")
if len(author) > 0:
author = author[0]
else:
author = ""
edition = get_fieldvalues(rec, "250__a")
if len(edition) > 0:
edition = edition[0]
else:
edition = ""
res.append((book_title_from_MARC(rec), author, edition, int(renewals)))
return (res)
def get_keyevent_returns_table(args, return_sql=False):
"""
Data:
- Number of overdue returns in a timespan
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
# Overdue returns:
sql = "SELECT COUNT(*) FROM crcLOAN l WHERE loaned_on > %s AND loaned_on < %s AND \
due_date < NOW() AND (returned_on IS NULL OR returned_on > due_date)"
if return_sql:
return sql % (lower, upper)
return ((run_sql(sql, (lower, upper))[0][0], ), )
def get_keyevent_trend_returns_percentage(args, return_sql=False):
"""
Returns the number of overdue returns and the total number of returns
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# SQL to determine overdue returns:
overdue = _get_sql_query("due_date", args["granularity"], "crcLOAN",
conditions="due_date < NOW() AND due_date IS NOT NULL \
AND (returned_on IS NULL OR returned_on > due_date)",
dates_range_param="loaned_on")
# SQL to determine all returns:
total = _get_sql_query("due_date", args["granularity"], "crcLOAN",
conditions="due_date < NOW() AND due_date IS NOT NULL",
dates_range_param="loaned_on")
# Compute the trend for both types
o_trend = _get_keyevent_trend(args, overdue,
return_sql=return_sql, sql_text="Overdue: %s")
t_trend = _get_keyevent_trend(args, total,
return_sql=return_sql, sql_text="Total: %s")
# Assemble, according to return type
if return_sql:
return "%s <br /> %s" % (o_trend, t_trend)
return [(o_trend[i][0], (o_trend[i][1], t_trend[i][1]))
for i in range(len(o_trend))]
def get_keyevent_ill_requests_statistics(args, return_sql=False):
"""
Data:
- Number of ILL requests
- Number of satisfied ILL requests 2 weeks after the date of request
creation on a timespan
- Average time between the date and the hour of the ill request
date and the date and the hour of the delivery item to the user
on a timespan
- Average time between the date and the hour the ILL request
was sent to the supplier and the date and hour of the
delivery item on a timespan
Filter by
- in a specified time span
- by type of document (book or article)
- by status of the request (= new, sent, etc.)
- by supplier
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['doctype']: type of document (book or article)
@type args['doctype']: str
@param args['status']: status of the request (= new, sent, etc.)
@type args['status']: str
@param args['supplier']: supplier
@type args['supplier']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcILLREQUEST ill "
sql_where = "WHERE period_of_interest_from > %s AND period_of_interest_from < %s "
param = [lower, upper]
if 'doctype' in args and args['doctype'] != '':
sql_where += "AND ill.request_type=%s"
param.append(args['doctype'])
if 'status' in args and args['status'] != '':
sql_where += "AND ill.status = %s "
param.append(args['status'])
else:
sql_where += "AND ill.status != %s "
param.append("cancelled") #FIXME: change to CFG variable
if 'supplier' in args and args['supplier'] != '':
sql_from += ", crcLIBRARY lib "
sql_where += "AND lib.id=ill.id_crcLIBRARY AND lib.name=%s "
param.append(args['supplier'])
param = tuple(param)
requests_sql = "SELECT COUNT(*) %s %s" % (sql_from, sql_where)
satrequests_sql = "SELECT COUNT(*) %s %s \
AND arrival_date IS NOT NULL AND \
DATEDIFF(arrival_date, period_of_interest_from) < 14 " % (sql_from, sql_where)
avgdel_sql = "SELECT AVG(TIMESTAMPDIFF(DAY, period_of_interest_from, arrival_date)) %s %s \
AND arrival_date IS NOT NULL" % (sql_from, sql_where)
avgsup_sql = "SELECT AVG(TIMESTAMPDIFF(DAY, request_date, arrival_date)) %s %s \
AND arrival_date IS NOT NULL \
AND request_date IS NOT NULL" % (sql_from, sql_where)
if return_sql:
return "<ol><li>%s</li><li>%s</li><li>%s</li><li>%s</li></ol>" % \
(requests_sql % param, satrequests_sql % param,
avgdel_sql % param, avgsup_sql % param)
# Number of requests:
requests = run_sql(requests_sql, param)[0][0]
# Number of satisfied ILL requests 2 weeks after the date of request creation:
satrequests = run_sql(satrequests_sql, param)[0][0]
# Average time between the date and the hour of the ill request date and
# the date and the hour of the delivery item to the user
avgdel = run_sql(avgdel_sql, param)[0][0]
if avgdel:
avgdel = float(avgdel)
else:
avgdel = 0
# Average time between the date and the hour the ILL request was sent to
# the supplier and the date and hour of the delivery item
avgsup = run_sql(avgsup_sql, param)[0][0]
if avgsup:
avgsup = float(avgsup)
else:
avgsup = 0
return ((requests, ), (satrequests, ), (avgdel, ), (avgsup, ))
def get_keyevent_ill_requests_lists(args, return_sql=False, limit=50):
"""
Lists:
- List of ILL requests
Filter by
- in a specified time span
- by type of request (article or book)
- by supplier
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['doctype']: type of request (article or book)
@type args['doctype']: str
@param args['supplier']: supplier
@type args['supplier']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcILLREQUEST ill "
sql_where = "WHERE status != 'cancelled' AND request_date > %s AND request_date < %s " #FIXME: change 'cancelled' to CFG variable
param = [lower, upper]
if 'doctype' in args and args['doctype'] != '':
sql_where += "AND ill.request_type=%s "
param.append(args['doctype'])
if 'supplier' in args and args['supplier'] != '':
sql_from += ", crcLIBRARY lib "
sql_where += "AND lib.id=ill.id_crcLIBRARY AND lib.name=%s "
param.append(args['supplier'])
param = tuple(param)
if limit > 0:
limit = "LIMIT %d" % limit
else:
limit = ""
sql = "SELECT ill.id, item_info %s %s %s" % (sql_from, sql_where, limit)
if return_sql:
return sql % param
# Results:
res = [("Id", "Title", "Author", "Edition")]
for req_id, item_info in run_sql(sql, param):
item_info = eval(item_info)
try:
res.append((req_id, item_info['title'], item_info['authors'], item_info['edition']))
except KeyError:
pass
return (res)
def get_keyevent_trend_satisfied_ill_requests_percentage(args, return_sql=False):
"""
Returns the number of satisfied ILL requests 2 weeks after the date of request
creation and the total number of ILL requests
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['doctype']: type of document (book or article)
@type args['doctype']: str
@param args['status']: status of the request (= new, sent, etc.)
@type args['status']: str
@param args['supplier']: supplier
@type args['supplier']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
sql_from = "crcILLREQUEST ill "
sql_where = ""
param = []
if 'doctype' in args and args['doctype'] != '':
sql_where += "AND ill.request_type=%s"
param.append(args['doctype'])
if 'status' in args and args['status'] != '':
sql_where += "AND ill.status = %s "
param.append(args['status'])
else:
sql_where += "AND ill.status != %s "
param.append("cancelled") #FIXME: change to CFG variable
if 'supplier' in args and args['supplier'] != '':
sql_from += ", crcLIBRARY lib "
sql_where += "AND lib.id=ill.id_crcLIBRARY AND lib.name=%s "
param.append(args['supplier'])
# SQL to determine satisfied ILL requests:
satisfied = _get_sql_query("request_date", args["granularity"], sql_from,
conditions="ADDDATE(request_date, 14) < NOW() AND \
(arrival_date IS NULL OR arrival_date < ADDDATE(request_date, 14)) " + sql_where)
# SQL to determine all ILL requests:
total = _get_sql_query("request_date", args["granularity"], sql_from,
conditions="ADDDATE(request_date, 14) < NOW() "+ sql_where)
# Compute the trend for both types
s_trend = _get_keyevent_trend(args, satisfied, extra_param=param,
return_sql=return_sql, sql_text="Satisfied: %s")
t_trend = _get_keyevent_trend(args, total, extra_param=param,
return_sql=return_sql, sql_text="Total: %s")
# Assemble, according to return type
if return_sql:
return "%s <br /> %s" % (s_trend, t_trend)
return [(s_trend[i][0], (s_trend[i][1], t_trend[i][1]))
for i in range(len(s_trend))]
def get_keyevent_items_statistics(args, return_sql=False):
"""
Data:
- The total number of items
- Total number of new items added in last year
Filter by
- in a specified time span
- by collection
- by UDC (see MARC field 080__a - list to be submitted)
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcITEM i "
sql_where = "WHERE "
param = []
if 'udc' in args and args['udc'] != '':
sql_where += "i." + _check_udc_value_where()
param.append(_get_udc_truncated(args['udc']))
# Number of items:
if sql_where == "WHERE ":
sql_where = ""
items_sql = "SELECT COUNT(i.id_bibrec) %s %s" % (sql_from, sql_where)
# Number of new items:
if sql_where == "":
sql_where = "WHERE creation_date > %s AND creation_date < %s "
else:
sql_where += " AND creation_date > %s AND creation_date < %s "
new_items_sql = "SELECT COUNT(i.id_bibrec) %s %s" % (sql_from, sql_where)
if return_sql:
return "Total: %s <br />New: %s" % (items_sql % tuple(param), new_items_sql % tuple(param + [lower, upper]))
return ((run_sql(items_sql, tuple(param))[0][0], ), (run_sql(new_items_sql, tuple(param + [lower, upper]))[0][0], ))
def get_keyevent_items_lists(args, return_sql=False, limit=50):
"""
Lists:
- The list of items
Filter by
- by library (=physical location of the item)
- by status (=on loan, available, requested, missing...)
@param args['library']: physical location of the item
@type args[library'']: str
@param args['status']: on loan, available, requested, missing...
@type args['status']: str
"""
sql_from = "FROM crcITEM i "
sql_where = "WHERE "
param = []
if 'library' in args and args['library'] != '':
sql_from += ", crcLIBRARY li "
sql_where += "li.id=i.id_crcLIBRARY AND li.name=%s "
param.append(args['library'])
if 'status' in args and args['status'] != '':
if sql_where != "WHERE ":
sql_where += "AND "
sql_where += "i.status = %s "
param.append(args['status'])
param = tuple(param)
# Results:
res = [("Title", "Author", "Edition", "Barcode", "Publication date")]
if sql_where == "WHERE ":
sql_where = ""
if limit > 0:
limit = "LIMIT %d" % limit
else:
limit = ""
sql = "SELECT i.barcode, i.id_bibrec %s %s %s" % (sql_from, sql_where, limit)
if len(param) == 0:
sqlres = run_sql(sql)
else:
sqlres = run_sql(sql, tuple(param))
sql = sql % param
if return_sql:
return sql
for barcode, rec in sqlres:
author = get_fieldvalues(rec, "100__a")
if len(author) > 0:
author = author[0]
else:
author = ""
edition = get_fieldvalues(rec, "250__a")
if len(edition) > 0:
edition = edition[0]
else:
edition = ""
res.append((book_title_from_MARC(rec),
author, edition, barcode,
book_information_from_MARC(int(rec))[1]))
return (res)
def get_keyevent_loan_request_statistics(args, return_sql=False):
"""
Data:
- Number of hold requests, one week after the date of request creation
- Number of successful hold requests transactions
- Average time between the hold request date and the date of delivery document in a year
Filter by
- in a specified time span
- by item status (available, missing)
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['item_status']: available, missing...
@type args['item_status']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcLOANREQUEST lr "
sql_where = "WHERE request_date > %s AND request_date < %s "
param = [lower, upper]
if 'item_status' in args and args['item_status'] != '':
sql_from += ", crcITEM i "
sql_where += "AND lr.barcode = i.barcode AND i.status = %s "
param.append(args['item_status'])
param = tuple(param)
custom_table = get_customevent_table("loanrequest")
# Number of hold requests, one week after the date of request creation:
holds = "SELECT COUNT(*) %s, %s ws %s AND ws.request_id=lr.id AND \
DATEDIFF(ws.creation_time, lr.request_date) >= 7" % (sql_from, custom_table, sql_where)
# Number of successful hold requests transactions
succesful_holds = "SELECT COUNT(*) %s %s AND lr.status='done'" % (sql_from, sql_where)
# Average time between the hold request date and the date of delivery document in a year
avg_sql = "SELECT AVG(DATEDIFF(ws.creation_time, lr.request_date)) \
%s, %s ws %s AND ws.request_id=lr.id" % (sql_from, custom_table, sql_where)
if return_sql:
return "<ol><li>%s</li><li>%s</li><li>%s</li></ol>" % \
(holds % param, succesful_holds % param, avg_sql % param)
avg = run_sql(avg_sql, param)[0][0]
if avg is int:
avg = int(avg)
else:
avg = 0
return ((run_sql(holds, param)[0][0], ),
(run_sql(succesful_holds, param)[0][0], ), (avg, ))
def get_keyevent_loan_request_lists(args, return_sql=False, limit=50):
"""
Lists:
- List of the most requested items
Filter by
- in a specified time span
- by UDC (see MARC field 080__a - list to be submitted)
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['udc']: MARC field 080__a
@type args['udc']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from = "FROM crcLOANREQUEST lr "
sql_where = "WHERE request_date > %s AND request_date < %s "
param = [lower, upper]
if 'udc' in args and args['udc'] != '':
sql_where += "AND lr." + _check_udc_value_where()
param.append(_get_udc_truncated(args['udc']))
if limit > 0:
limit = "LIMIT %d" % limit
else:
limit = ""
sql = "SELECT lr.barcode %s %s GROUP BY barcode \
ORDER BY COUNT(*) DESC %s" % (sql_from, sql_where, limit)
if return_sql:
return sql
res = [("Title", "Author", "Edition", "Barcode")]
# Most requested items:
for barcode in run_sql(sql, param):
rec = get_id_bibrec(barcode[0])
author = get_fieldvalues(rec, "100__a")
if len(author) > 0:
author = author[0]
else:
author = ""
edition = get_fieldvalues(rec, "250__a")
if len(edition) > 0:
edition = edition[0]
else:
edition = ""
res.append((book_title_from_MARC(rec), author, edition, barcode[0]))
return (res)
def get_keyevent_user_statistics(args, return_sql=False):
"""
Data:
- Total number of active users (to be defined = at least one transaction in the past year)
Filter by
- in a specified time span
- by registration date
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
sql_from_ill = "FROM crcILLREQUEST ill "
sql_from_loan = "FROM crcLOAN l "
sql_where_ill = "WHERE request_date > %s AND request_date < %s "
sql_where_loan = "WHERE loaned_on > %s AND loaned_on < %s "
param = (lower, upper, lower, upper)
# Total number of active users:
users = "SELECT COUNT(DISTINCT user) FROM ((SELECT id_crcBORROWER user %s %s) \
UNION (SELECT id_crcBORROWER user %s %s)) res" % \
(sql_from_ill, sql_where_ill, sql_from_loan, sql_where_loan)
if return_sql:
return users % param
return ((run_sql(users, param)[0][0], ), )
def get_keyevent_user_lists(args, return_sql=False, limit=50):
"""
Lists:
- List of most intensive users (ILL requests + Loan)
Filter by
- in a specified time span
- by registration date
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
param = (lower, upper, lower, upper)
if limit > 0:
limit = "LIMIT %d" % limit
else:
limit = ""
sql = "SELECT user, SUM(trans) FROM \
((SELECT id_crcBORROWER user, COUNT(*) trans FROM crcILLREQUEST ill \
WHERE request_date > %%s AND request_date < %%s GROUP BY id_crcBORROWER) UNION \
(SELECT id_crcBORROWER user, COUNT(*) trans FROM crcLOAN l WHERE loaned_on > %%s AND \
loaned_on < %%s GROUP BY id_crcBORROWER)) res GROUP BY user ORDER BY SUM(trans) DESC \
%s" % (limit)
if return_sql:
return sql % param
res = [("Name", "Address", "Mailbox", "E-mail", "Number of transactions")]
# List of most intensive users (ILL requests + Loan):
for borrower_id, trans in run_sql(sql, param):
name, address, mailbox, email = get_borrower_data(borrower_id)
res.append((name, address, mailbox, email, int(trans)))
return (res)
# KEY EVENT SNAPSHOT SECTION
def get_keyevent_snapshot_uptime_cmd():
"""
A specific implementation of get_current_event().
@return: The std-out from the UNIX command 'uptime'.
@type: str
"""
return _run_cmd('uptime').strip().replace(' ', ' ')
def get_keyevent_snapshot_apache_processes():
"""
A specific implementation of get_current_event().
@return: The std-out from the UNIX command 'uptime'.
@type: str
"""
# The number of Apache processes (root+children)
return _run_cmd('ps -e | grep apache2 | grep -v grep | wc -l')
def get_keyevent_snapshot_bibsched_status():
"""
A specific implementation of get_current_event().
@return: Information about the number of tasks in the different status modes.
@type: [(str, int)]
"""
sql = "SELECT status, COUNT(status) FROM schTASK GROUP BY status"
return [(x[0], int(x[1])) for x in run_sql(sql)]
def get_keyevent_snapshot_sessions():
"""
A specific implementation of get_current_event().
@return: The current number of website visitors (guests, logged in)
@type: (int, int)
"""
# SQL to retrieve sessions in the Guests
sql = "SELECT COUNT(session_expiry) " + \
"FROM session INNER JOIN user ON uid=id " + \
"WHERE email = '' AND " + \
"session_expiry-%d < unix_timestamp() AND " \
% WEBSTAT_SESSION_LENGTH + \
"unix_timestamp() < session_expiry"
guests = run_sql(sql)[0][0]
# SQL to retrieve sessions in the Logged in users
sql = "SELECT COUNT(session_expiry) " + \
"FROM session INNER JOIN user ON uid=id " + \
"WHERE email <> '' AND " + \
"session_expiry-%d < unix_timestamp() AND " \
% WEBSTAT_SESSION_LENGTH + \
"unix_timestamp() < session_expiry"
logged_ins = run_sql(sql)[0][0]
# Assemble, according to return type
return (guests, logged_ins)
def get_keyevent_bibcirculation_report(freq='yearly'):
"""
Monthly and yearly report with the total number of circulation
transactions (loans, renewals, returns, ILL requests, hold request).
@param freq: yearly or monthly
@type freq: str
@return: loans, renewals, returns, ILL requests, hold request
@type: (int, int, int, int, int)
"""
if freq == 'monthly':
datefrom = datetime.date.today().strftime("%Y-%m-01 00:00:00")
else: #yearly
datefrom = datetime.date.today().strftime("%Y-01-01 00:00:00")
loans, renewals = run_sql("SELECT COUNT(*), \
SUM(number_of_renewals) \
FROM crcLOAN WHERE loaned_on > %s", (datefrom, ))[0]
returns = run_sql("SELECT COUNT(*) FROM crcLOAN \
WHERE returned_on!='0000-00-00 00:00:00' and loaned_on > %s", (datefrom, ))[0][0]
illrequests = run_sql("SELECT COUNT(*) FROM crcILLREQUEST WHERE request_date > %s",
(datefrom, ))[0][0]
holdrequest = run_sql("SELECT COUNT(*) FROM crcLOANREQUEST WHERE request_date > %s",
(datefrom, ))[0][0]
return (loans, renewals, returns, illrequests, holdrequest)
# ERROR LOG STATS
def update_error_log_analyzer():
"""Creates splitted files for today's errors"""
_run_cmd('bash %s/webstat -e -is' % CFG_BINDIR)
def get_invenio_error_log_ranking():
""" Returns the ranking of the errors in the invenio log"""
return _run_cmd('bash %s/webstat -e -ir' % CFG_BINDIR)
def get_invenio_last_n_errors(nerr):
"""Returns the last nerr errors in the invenio log (without details)"""
return _run_cmd('bash %s/webstat -e -il %d' % (CFG_BINDIR, nerr))
def get_invenio_error_details(error):
"""Returns the complete text of the invenio error."""
out = _run_cmd('bash %s/webstat -e -id %s' % (CFG_BINDIR, error))
return out
def get_apache_error_log_ranking():
""" Returns the ranking of the errors in the apache log"""
return _run_cmd('bash %s/webstat -e -ar' % CFG_BINDIR)
# CUSTOM EVENT SECTION
def get_customevent_trend(args):
"""
Returns trend data for a custom event over a given
timestamp range.
@param args['event_id']: The event id
@type args['event_id']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
@param args['cols']: Columns and it's content that will be include
if don't exist or it's empty it will include all cols
@type args['cols']: [ [ str, str ], ]
"""
# Get a MySQL friendly date
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
tbl_name = get_customevent_table(args['event_id'])
col_names = get_customevent_args(args['event_id'])
where = []
sql_param = [lower, upper]
for col_bool, col_title, col_content in args['cols']:
if not col_title in col_names:
continue
if col_content:
if col_bool == "" or not where:
where.append(wash_table_column_name(col_title))
elif col_bool == "and":
where.append("AND %s"
% wash_table_column_name(col_title))
elif col_bool == "or":
where.append("OR %s"
% wash_table_column_name(col_title))
elif col_bool == "and_not":
where.append("AND NOT %s"
% wash_table_column_name(col_title))
else:
continue
where.append(" LIKE %s")
sql_param.append("%" + col_content + "%")
sql = _get_sql_query("creation_time", args['granularity'], tbl_name, " ".join(where))
return _get_trend_from_actions(run_sql(sql, tuple(sql_param)), 0,
args['t_start'], args['t_end'],
args['granularity'], args['t_format'])
def get_customevent_dump(args):
"""
Similar to a get_event_trend implemention, but NO refining aka frequency
handling is carried out what so ever. This is just a dump. A dump!
@param args['event_id']: The event id
@type args['event_id']: str
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
@param args['cols']: Columns and it's content that will be include
if don't exist or it's empty it will include all cols
@type args['cols']: [ [ str, str ], ]
"""
# Get a MySQL friendly date
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
# Get customevents
# events_list = [(creation_time, event, [arg1, arg2, ...]), ...]
event_list = []
event_cols = {}
for event_id, i in [(args['ids'][i], str(i))
for i in range(len(args['ids']))]:
# Get all the event arguments and creation times
tbl_name = get_customevent_table(event_id)
col_names = get_customevent_args(event_id)
sql_query = ["SELECT * FROM %s WHERE creation_time > '%%s'" % wash_table_column_name(tbl_name), (lower,)] # kwalitee: disable=sql
sql_query.append("AND creation_time < '%s'" % upper)
sql_param = []
for col_bool, col_title, col_content in args['cols' + i]:
if not col_title in col_names:
continue
if col_content:
if col_bool == "and" or col_bool == "":
sql_query.append("AND %s" % \
wash_table_column_name(col_title))
elif col_bool == "or":
sql_query.append("OR %s" % \
wash_table_column_name(col_title))
elif col_bool == "and_not":
sql_query.append("AND NOT %s" % \
wash_table_column_name(col_title))
else:
continue
sql_query.append(" LIKE %s")
sql_param.append("%" + col_content + "%")
sql_query.append("ORDER BY creation_time DESC")
sql = ' '.join(sql_query)
res = run_sql(sql, tuple(sql_param))
for row in res:
event_list.append((row[1], event_id, row[2:]))
# Get the event col names
try:
event_cols[event_id] = cPickle.loads(run_sql(
"SELECT cols FROM staEVENT WHERE id = %s",
(event_id, ))[0][0])
except TypeError:
event_cols[event_id] = ["Unnamed"]
event_list.sort()
output = []
for row in event_list:
temp = [row[1], row[0].strftime('%Y-%m-%d %H:%M:%S')]
arguments = ["%s: %s" % (event_cols[row[1]][i],
row[2][i]) for i in range(len(row[2]))]
temp.extend(arguments)
output.append(tuple(temp))
return output
def get_customevent_table(event_id):
"""
Helper function that for a certain event id retrives the corresponding
event table name.
"""
res = run_sql(
"SELECT CONCAT('staEVENT', number) FROM staEVENT WHERE id = %s", (event_id, ))
try:
return res[0][0]
except IndexError:
# No such event table
return None
def get_customevent_args(event_id):
"""
Helper function that for a certain event id retrives the corresponding
event argument (column) names.
"""
res = run_sql("SELECT cols FROM staEVENT WHERE id = %s", (event_id, ))
try:
if res[0][0]:
return cPickle.loads(res[0][0])
else:
return []
except IndexError:
# No such event table
return None
# CUSTOM SUMMARY SECTION
def get_custom_summary_data(query, tag):
"""Returns the annual report data for the specified year
@param query: Search query to make customized report
@type query: str
@param tag: MARC tag for the output
@type tag: str
"""
# Check arguments
if tag == '':
tag = CFG_JOURNAL_TAG.replace("%", "p")
# First get records of the year
recids = perform_request_search(p=query, of="id")
# Then return list by tag
pub = list(get_most_popular_field_values(recids, tag))
if len(pub) == 0:
return []
if CFG_CERN_SITE:
total = sum([x[1] for x in pub])
else:
others = 0
total = 0
first_other = -1
for elem in pub:
total += elem[1]
if elem[1] < 2:
if first_other == -1:
first_other = pub.index(elem)
others += elem[1]
del pub[first_other:]
if others != 0:
pub.append(('Others', others))
pub.append(('TOTAL', total))
return pub
def create_custom_summary_graph(data, path, title):
"""
Creates a pie chart with the information from the custom summary and
saves it in the file specified by the path argument
"""
# If no input, we don't bother about anything
if len(data) == 0:
return
os.environ['HOME'] = CFG_TMPDIR
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
except ImportError:
return
# make a square figure and axes
matplotlib.rcParams['font.size'] = 8
labels = [x[0] for x in data]
numb_elem = len(labels)
width = 6 + float(numb_elem) / 7
gfile = plt.figure(1, figsize=(width, 6))
plt.axes([0.1, 0.1, 4.2 / width, 0.7])
numb = [x[1] for x in data]
total = sum(numb)
fracs = [x * 100 / total for x in numb]
colors = []
random.seed()
for i in range(numb_elem):
col = 0.5 + float(i) / (float(numb_elem) * 2.0)
rand = random.random() / 2.0
if i % 3 == 0:
red = col
green = col + rand
blue = col - rand
if green > 1.0:
green = 1
elif i % 3 == 1:
red = col - rand
green = col
blue = col + rand
if blue > 1.0:
blue = 1
elif i % 3 == 2:
red = col + rand
green = col - rand
blue = col
if red > 1.0:
red = 1
colors.append((red, green, blue))
patches = plt.pie(fracs, colors=tuple(colors), labels=labels,
autopct='%1i%%', pctdistance=0.8, shadow=True)[0]
ttext = plt.title(title)
plt.setp(ttext, size='xx-large', color='b', family='monospace', weight='extra bold')
legend_keywords = {"prop": {"size": "small"}}
plt.figlegend(patches, labels, 'lower right', **legend_keywords)
plt.savefig(path)
plt.close(gfile)
# GRAPHER
def create_graph_trend(trend, path, settings):
"""
Creates a graph representation out of data produced from get_event_trend.
@param trend: The trend data
@type trend: [(str, str|int|(str|int,...))]
@param path: Where to store the graph
@type path: str
@param settings: Dictionary of graph parameters
@type settings: dict
"""
# If no input, we don't bother about anything
if not trend or len(trend) == 0:
return
# If no filename is given, we'll assume STD-out format and ASCII.
if path == '':
settings["format"] = 'asciiart'
if settings["format"] == 'asciiart':
create_graph_trend_ascii_art(trend, path, settings)
elif settings["format"] == 'gnuplot':
create_graph_trend_gnu_plot(trend, path, settings)
elif settings["format"] == "flot":
create_graph_trend_flot(trend, path, settings)
def create_graph_trend_ascii_art(trend, path, settings):
"""Creates the graph trend using ASCII art"""
out = ""
if settings["multiple"] is not None:
# Tokens that will represent the different data sets (maximum 16 sets)
# Set index (=100) to the biggest of the histogram sums
index = max([sum(x[1]) for x in trend])
# Print legend box
out += "Legend: %s\n\n" % ", ".join(["%s (%s)" % x
for x in zip(settings["multiple"], WEBSTAT_GRAPH_TOKENS)])
else:
index = max([x[1] for x in trend])
width = 82
# Figure out the max length of the xtics, in order to left align
xtic_max_len = max([len(_to_datetime(x[0]).strftime(
settings["xtic_format"])) for x in trend])
for row in trend:
# Print the xtic
xtic = _to_datetime(row[0]).strftime(settings["xtic_format"])
out_row = xtic + ': ' + ' ' * (xtic_max_len - len(xtic)) + '|'
try:
col_width = (1.0 * width / index)
except ZeroDivisionError:
col_width = 0
if settings["multiple"] is not None:
# The second value of the row-tuple, represents the n values from
# the n data sets. Each set, will be represented by a different
# ASCII character, chosen from the randomized string
# 'WEBSTAT_GRAPH_TOKENS'.
# NOTE: Only up to 16 (len(WEBSTAT_GRAPH_TOKENS)) data
# sets are supported.
total = sum(row[1])
for i in range(len(row[1])):
col = row[1][i]
try:
out_row += WEBSTAT_GRAPH_TOKENS[i] * int(1.0 * col * col_width)
except ZeroDivisionError:
break
if len([i for i in row[1] if type(i) is int and i > 0]) - 1 > 0:
out_row += out_row[-1]
else:
total = row[1]
try:
out_row += '-' * int(1.0 * total * col_width)
except ZeroDivisionError:
break
# Print sentinel, and the total
out += out_row + '>' + ' ' * (xtic_max_len + 4 +
width - len(out_row)) + str(total) + '\n'
# Write to destination file
if path == '':
print out
else:
open(path, 'w').write(out)
def create_graph_trend_gnu_plot(trend, path, settings):
"""Creates the graph trend using the GNU plot library"""
try:
import Gnuplot
except ImportError:
return
gnup = Gnuplot.Gnuplot()
gnup('set style data steps')
if 'size' in settings:
gnup('set terminal png tiny size %s' % settings['size'])
else:
gnup('set terminal png tiny')
gnup('set output "%s"' % path)
if settings["title"] != '':
gnup.title(settings["title"].replace("\"", ""))
if settings["xlabel"] != '':
gnup.xlabel(settings["xlabel"])
if settings["ylabel"] != '':
gnup.ylabel(settings["ylabel"])
if settings["xtic_format"] != '':
xtics = 'set xtics ('
xtics += ', '.join(['"%s" %d' %
(_to_datetime(trend[i][0], '%Y-%m-%d \
%H:%M:%S').strftime(settings["xtic_format"]), i)
for i in range(len(trend))]) + ')'
gnup(xtics)
gnup('set format y "%.0f"')
# If we have multiple data sets, we need to do
# some magic to make Gnuplot eat it,
# This is basically a matrix transposition,
# and the addition of index numbers.
if settings["multiple"] is not None:
cols = len(trend[0][1])
rows = len(trend)
plot_items = []
y_max = 0
y_min = 0
for col in range(cols):
data = []
for row in range(rows):
data.append([row, trend[row][1][col]])
data.append([rows, trend[-1][1][col]])
plot_items.append(Gnuplot.PlotItems
.Data(data, title=settings["multiple"][col]))
tmp_max = max([x[col] for x in data])
tmp_min = min([x[col] for x in data])
if tmp_max > y_max:
y_max = tmp_max
if tmp_min < y_min:
y_min = tmp_min
if y_max - y_min < 5 and y_min != 0:
gnup('set ytic %d, 1, %d' % (y_min - 1, y_max + 2))
elif y_max < 5:
gnup('set ytic 1')
gnup.plot(*plot_items)
else:
data = [x[1] for x in trend]
data.append(trend[-1][1])
y_max = max(data)
y_min = min(data)
if y_max - y_min < 5 and y_min != 0:
gnup('set ytic %d, 1, %d' % (y_min - 1, y_max + 2))
elif y_max < 5:
gnup('set ytic 1')
gnup.plot(data)
def create_graph_trend_flot(trend, path, settings):
"""Creates the graph trend using the flot library"""
size = settings.get("size", "500,400").split(",")
title = cgi.escape(settings["title"].replace(" ", "")[:10])
out = """<!--[if IE]><script language="javascript" type="text/javascript"
src="%(site)s/js/excanvas.min.js"></script><![endif]-->
<script language="javascript" type="text/javascript" src="%(site)s/js/jquery.flot.min.js"></script>
<script language="javascript" type="text/javascript" src="%(site)s/js/jquery.flot.selection.min.js"></script>
<script id="source" language="javascript" type="text/javascript">
document.write('<div style="float:left"><div id="placeholder%(title)s" style="width:%(width)spx;height:%(height)spx"></div></div>'+
'<div id="miniature%(title)s" style="float:left;margin-left:20px;margin-top:50px">' +
'<div id="overview%(title)s" style="width:%(hwidth)dpx;height:%(hheigth)dpx"></div>' +
'<p id="overviewLegend%(title)s" style="margin-left:10px"></p>' +
'</div>');
$(function () {
function parseDate%(title)s(sdate){
var div1 = sdate.split(' ');
var day = div1[0].split('-');
var hour = div1[1].split(':');
return new Date(day[0], day[1]-1, day[2], hour[0], hour[1], hour[2]).getTime() - (new Date().getTimezoneOffset() * 60 * 1000) ;
}
function getData%(title)s() {""" % \
{'site': CFG_SITE_URL, 'width': size[0], 'height': size[1], 'hwidth': int(size[0]) / 2,
'hheigth': int(size[1]) / 2, 'title': title}
if(len(trend) > 1):
granularity_td = (_to_datetime(trend[1][0], '%Y-%m-%d %H:%M:%S') -
_to_datetime(trend[0][0], '%Y-%m-%d %H:%M:%S'))
else:
granularity_td = datetime.timedelta()
# Create variables with the format dn = [[x1,y1], [x2,y2]]
minx = trend[0][0]
maxx = trend[0][0]
if settings["multiple"] is not None:
cols = len(trend[0][1])
rows = len(trend)
first = 0
for col in range(cols):
out += """var d%d = [""" % (col)
for row in range(rows):
if(first == 0):
first = 1
else:
out += ", "
if trend[row][0] < minx:
minx = trend[row][0]
if trend[row][0] > maxx:
maxx = trend[row][0]
out += '[parseDate%s("%s"),%d]' % \
(title, _to_datetime(trend[row][0], '%Y-%m-%d \
%H:%M:%S'), trend[row][1][col])
out += ", [parseDate%s('%s'), %d]];\n" % (title,
_to_datetime(maxx, '%Y-%m-%d %H:%M:%S')+ granularity_td,
trend[-1][1][col])
out += "return [\n"
first = 0
for col in range(cols):
if first == 0:
first = 1
else:
out += ", "
out += '{data : d%d, label : "%s"}' % \
(col, settings["multiple"][col])
out += "];\n}\n"
else:
out += """var d1 = ["""
rows = len(trend)
first = 0
for row in range(rows):
if trend[row][0] < minx:
minx = trend[row][0]
if trend[row][0] > maxx:
maxx = trend[row][0]
if first == 0:
first = 1
else:
out += ', '
out += '[parseDate%s("%s"),%d]' % \
(title, _to_datetime(trend[row][0], '%Y-%m-%d %H:%M:%S'),
trend[row][1])
out += """, [parseDate%s("%s"), %d]];
return [d1];
}
""" % (title, _to_datetime(maxx, '%Y-%m-%d %H:%M:%S') +
granularity_td, trend[-1][1])
# Set options
tics = """yaxis: {
tickDecimals : 0
},"""
if settings["xtic_format"] != '':
current = _to_datetime(maxx, '%Y-%m-%d %H:%M:%S')
next = current + granularity_td
if (granularity_td.seconds + granularity_td.days * 24 * 3600) > 2592000:
next = current.replace(day=31)
tics += 'xaxis: { mode:"time",min:parseDate%s("%s"),max:parseDate%s("%s")},'\
% (title, _to_datetime(minx, '%Y-%m-%d %H:%M:%S'), title, next)
out += """var options%s ={
series: {
lines: { steps: true, fill: true},
points: { show: false }
},
legend: {show: false},
%s
grid: { hoverable: true, clickable: true },
selection: { mode: "xy" }
};
""" % (title, tics, )
# Write the plot method in javascript
out += """var startData%(title)s = getData%(title)s();
var plot%(title)s = $.plot($("#placeholder%(title)s"), startData%(title)s, options%(title)s);
// setup overview
var overview%(title)s = $.plot($("#overview%(title)s"), startData%(title)s, {
legend: { show: true, container: $("#overviewLegend%(title)s") },
series: {
lines: { steps: true, fill: true, lineWidth: 1},
shadowSize: 0
},
%(tics)s
grid: { color: "#999" },
selection: { mode: "xy" }
});
""" % {"title": title, "tics": tics}
# Tooltip and zoom
out += """
function showTooltip%(title)s(x, y, contents) {
$('<div id="tooltip%(title)s">' + contents + '</div>').css( {
position: 'absolute',
display: 'none',
top: y - 5,
left: x + 10,
border: '1px solid #fdd',
padding: '2px',
'background-color': '#fee',
opacity: 0.80
}).appendTo("body").fadeIn(200);
}
var previousPoint%(title)s = null;
$("#placeholder%(title)s").bind("plothover", function (event, pos, item) {
if (item) {
if (previousPoint%(title)s != item.datapoint) {
previousPoint%(title)s = item.datapoint;
$("#tooltip%(title)s").remove();
var y = item.datapoint[1];
showTooltip%(title)s(item.pageX, item.pageY, y);
}
}
else {
$("#tooltip%(title)s").remove();
previousPoint%(title)s = null;
}
});
$("#placeholder%(title)s").bind("plotclick", function (event, pos, item) {
if (item) {
plot%(title)s.highlight(item.series, item.datapoint);
}
});
// now connect the two
$("#placeholder%(title)s").bind("plotselected", function (event, ranges) {
// clamp the zooming to prevent eternal zoom
if (ranges.xaxis.to - ranges.xaxis.from < 0.00001){
ranges.xaxis.to = ranges.xaxis.from + 0.00001;}
if (ranges.yaxis.to - ranges.yaxis.from < 0.00001){
ranges.yaxis.to = ranges.yaxis.from + 0.00001;}
// do the zooming
plot%(title)s = $.plot($("#placeholder%(title)s"), getData%(title)s(ranges.xaxis.from, ranges.xaxis.to),
$.extend(true, {}, options%(title)s, {
xaxis: { min: ranges.xaxis.from, max: ranges.xaxis.to },
yaxis: { min: ranges.yaxis.from, max: ranges.yaxis.to }
}));
// don't fire event on the overview to prevent eternal loop
overview%(title)s.setSelection(ranges, true);
});
$("#overview%(title)s").bind("plotselected", function (event, ranges) {
plot%(title)s.setSelection(ranges);
});
});
</script>
<noscript>Your browser does not support JavaScript!
Please, select another output format</noscript>""" % {'title' : title}
open(path, 'w').write(out)
def get_numeric_stats(data, multiple):
""" Returns average, max and min values for data """
data = [x[1] for x in data]
if data == []:
return (0, 0, 0)
if multiple:
lists = []
for i in range(len(data[0])):
lists.append([x[i] for x in data])
return ([float(sum(x)) / len(x) for x in lists], [max(x) for x in lists],
[min(x) for x in lists])
else:
return (float(sum(data)) / len(data), max(data), min(data))
def create_graph_table(data, path, settings):
"""
Creates a html table representation out of data.
@param data: The data
@type data: (str,...)
@param path: Where to store the graph
@type path: str
@param settings: Dictionary of table parameters
@type settings: dict
"""
out = """<table border="1">
"""
if settings['rows'] == []:
for row in data:
out += """<tr>
"""
for value in row:
out += """<td>%s</td>
""" % value
out += "</tr>"
else:
for dta, value in zip(settings['rows'], data):
out += """<tr>
<td>%s</td>
<td>
""" % dta
for vrow in value:
out += """%s<br />
""" % vrow
out = out[:-6] + "</td></tr>"
out += "</table>"
open(path, 'w').write(out)
def create_graph_dump(dump, path):
"""
Creates a graph representation out of data produced from get_event_trend.
@param dump: The dump data
@type dump: [(str|int,...)]
@param path: Where to store the graph
@type path: str
"""
out = ""
if len(dump) == 0:
out += "No actions for this custom event " + \
"are registered in the given time range."
else:
# Make every row in dump equally long, insert None if appropriate.
max_len = max([len(x) for x in dump])
events = [tuple(list(x) + [None] * (max_len - len(x))) for x in dump]
cols = ["Event", "Date and time"] + ["Argument %d" % i
for i in range(max_len - 2)]
column_widths = [max([len(str(x[i])) \
for x in events + [cols]]) + 3 for i in range(len(events[0]))]
for i in range(len(cols)):
out += cols[i] + ' ' * (column_widths[i] - len(cols[i]))
out += "\n"
for i in range(len(cols)):
out += '=' * (len(cols[i])) + ' ' * (column_widths[i] - len(cols[i]))
out += "\n\n"
for action in dump:
for i in range(len(action)):
if action[i] is None:
temp = ''
else:
temp = action[i]
out += str(temp) + ' ' * (column_widths[i] - len(str(temp)))
out += "\n"
# Write to destination file
if path == '':
print out
else:
open(path, 'w').write(out)
# EXPORT DATA TO SLS
def get_search_frequency(day=datetime.datetime.now().date()):
"""Returns the number of searches performed in the chosen day"""
searches = get_keyevent_trend_search_type_distribution(get_args(day))
return sum(searches[0][1])
def get_total_records(day=datetime.datetime.now().date()):
"""Returns the total number of records which existed in the chosen day"""
tomorrow = (datetime.datetime.now() +
datetime.timedelta(days=1)).strftime("%Y-%m-%d")
args = {'collection': CFG_SITE_NAME, 't_start': day.strftime("%Y-%m-%d"),
't_end': tomorrow, 'granularity': "day", 't_format': "%Y-%m-%d"}
try:
return get_keyevent_trend_collection_population(args)[0][1]
except IndexError:
return 0
def get_new_records(day=datetime.datetime.now().date()):
"""Returns the number of new records submitted in the chosen day"""
args = {'collection': CFG_SITE_NAME,
't_start': (day - datetime.timedelta(days=1)).strftime("%Y-%m-%d"),
't_end': day.strftime("%Y-%m-%d"), 'granularity': "day",
't_format': "%Y-%m-%d"}
try:
return (get_total_records(day) -
get_keyevent_trend_collection_population(args)[0][1])
except IndexError:
return 0
def get_download_frequency(day=datetime.datetime.now().date()):
"""Returns the number of downloads during the chosen day"""
return get_keyevent_trend_download_frequency(get_args(day))[0][1]
def get_comments_frequency(day=datetime.datetime.now().date()):
"""Returns the number of comments during the chosen day"""
return get_keyevent_trend_comments_frequency(get_args(day))[0][1]
def get_loans_frequency(day=datetime.datetime.now().date()):
"""Returns the number of comments during the chosen day"""
return get_keyevent_trend_number_of_loans(get_args(day))[0][1]
def get_web_submissions(day=datetime.datetime.now().date()):
"""Returns the number of web submissions during the chosen day"""
args = get_args(day)
args['doctype'] = 'all'
return get_keyevent_trend_web_submissions(args)[0][1]
def get_alerts(day=datetime.datetime.now().date()):
"""Returns the number of alerts during the chosen day"""
args = get_args(day)
args['cols'] = [('', '', '')]
args['event_id'] = 'alerts'
return get_customevent_trend(args)[0][1]
def get_journal_views(day=datetime.datetime.now().date()):
"""Returns the number of journal displays during the chosen day"""
args = get_args(day)
args['cols'] = [('', '', '')]
args['event_id'] = 'journals'
return get_customevent_trend(args)[0][1]
def get_basket_views(day=datetime.datetime.now().date()):
"""Returns the number of basket displays during the chosen day"""
args = get_args(day)
args['cols'] = [('', '', '')]
args['event_id'] = 'baskets'
return get_customevent_trend(args)[0][1]
def get_args(day):
"""Returns the most common arguments for the exporting to SLS methods"""
return {'t_start': day.strftime("%Y-%m-%d"),
't_end': (day + datetime.timedelta(days=1)).strftime("%Y-%m-%d"),
'granularity': "day", 't_format': "%Y-%m-%d"}
# EXPORTER
def export_to_python(data, req):
"""
Exports the data to Python code.
@param data: The Python data that should be exported
@type data: []
@param req: The Apache request object
@type req:
"""
_export("text/x-python", str(data), req)
def export_to_csv(data, req):
"""
Exports the data to CSV.
@param data: The Python data that should be exported
@type data: []
@param req: The Apache request object
@type req:
"""
csv_list = [""""%s",%s""" % (x[0], ",".join([str(y) for y in \
((type(x[1]) is tuple) and x[1] or (x[1], ))])) for x in data]
_export('text/csv', '\n'.join(csv_list), req)
def export_to_file(data, req):
"""
Exports the data to a file.
@param data: The Python data that should be exported
@type data: []
@param req: The Apache request object
@type req:
"""
try:
import xlwt
book = xlwt.Workbook(encoding="utf-8")
sheet1 = book.add_sheet('Sheet 1')
for row in range(0, len(data)):
for col in range(0, len(data[row])):
sheet1.write(row, col, "%s" % data[row][col])
filename = CFG_TMPDIR + "/webstat_export_" + \
str(time.time()).replace('.', '') + '.xls'
book.save(filename)
redirect_to_url(req, '%s/stats/export?filename=%s&mime=%s' \
% (CFG_SITE_URL, os.path.basename(filename), 'application/vnd.ms-excel'))
except ImportError:
csv_list = []
for row in data:
row = ['"%s"' % str(col) for col in row]
csv_list.append(",".join(row))
_export('text/csv', '\n'.join(csv_list), req)
# INTERNAL
def _export(mime, content, req):
"""
Helper function to pass on the export call. Create a
temporary file in which the content is stored, then let
redirect to the export web interface.
"""
filename = CFG_TMPDIR + "/webstat_export_" + \
str(time.time()).replace('.', '')
open(filename, 'w').write(content)
redirect_to_url(req, '%s/stats/export?filename=%s&mime=%s' \
% (CFG_SITE_URL, os.path.basename(filename), mime))
def _get_trend_from_actions(action_dates, initial_value,
t_start, t_end, granularity, dt_format, acumulative=False):
"""
Given a list of dates reflecting some sort of action/event, and some additional parameters,
an internal data format is returned. 'initial_value' set to zero, means that the frequency
will not be accumulative, but rather non-causal.
@param action_dates: A list of dates, indicating some sort of action/event.
@type action_dates: [datetime.datetime]
@param initial_value: The numerical offset the first action's value should make use of.
@type initial_value: int
@param t_start: Start time for the time domain in dt_format
@type t_start: str
@param t_end: End time for the time domain in dt_format
@type t_end: str
@param granularity: The granularity of the time domain, span between values.
Possible values are [year,month,day,hour,minute,second].
@type granularity: str
@param dt_format: Format of the 't_start' and 't_stop' parameters
@type dt_format: str
@return: A list of tuples zipping a time-domain and a value-domain
@type: [(str, int)]
"""
# Append the maximum date as a sentinel indicating we're done
action_dates = list(action_dates)
# Construct the datetime tuple for the stop time
stop_at = _to_datetime(t_end, dt_format) - datetime.timedelta(seconds=1)
vector = [(None, initial_value)]
try:
upcoming_action = action_dates.pop()
#Do not count null values (when year, month or day is 0)
if granularity in ("year", "month", "day") and upcoming_action[0] == 0:
upcoming_action = action_dates.pop()
except IndexError:
upcoming_action = (datetime.datetime.max, 0)
# Create an iterator running from the first day of activity
for current in _get_datetime_iter(t_start, granularity, dt_format):
# Counter of action_dates in the current span, set the initial value to
# zero to avoid accumlation.
if acumulative:
actions_here = vector[-1][1]
else:
actions_here = 0
# Check to see if there's an action date in the current span
if upcoming_action[0] == {"year": current.year,
"month": current.month,
"day": current.day,
"hour": current.hour,
"minute": current.minute,
"second": current.second
}[granularity]:
actions_here += upcoming_action[1]
try:
upcoming_action = action_dates.pop()
except IndexError:
upcoming_action = (datetime.datetime.max, 0)
vector.append((current.strftime('%Y-%m-%d %H:%M:%S'), actions_here))
# Make sure to stop the iteration at the end time
if {"year": current.year >= stop_at.year,
"month": current.month >= stop_at.month and current.year == stop_at.year,
"day": current.day >= stop_at.day and current.month == stop_at.month,
"hour": current.hour >= stop_at.hour and current.day == stop_at.day,
"minute": current.minute >= stop_at.minute and current.hour == stop_at.hour,
"second": current.second >= stop_at.second and current.minute == stop_at.minute
}[granularity]:
break
# Remove the first bogus tuple, and return
return vector[1:]
def _get_keyevent_trend(args, sql, initial_quantity=0, extra_param=[],
return_sql=False, sql_text='%s', acumulative=False):
"""
Returns the trend for the sql passed in the given timestamp range.
@param args['t_start']: Date and time of start point
@type args['t_start']: str
@param args['t_end']: Date and time of end point
@type args['t_end']: str
@param args['granularity']: Granularity of date and time
@type args['granularity']: str
@param args['t_format']: Date and time formatting string
@type args['t_format']: str
"""
# collect action dates
lower = _to_datetime(args['t_start'], args['t_format']).isoformat()
upper = _to_datetime(args['t_end'], args['t_format']).isoformat()
param = tuple([lower, upper] + extra_param)
if return_sql:
sql = sql % param
return sql_text % sql
return _get_trend_from_actions(run_sql(sql, param), initial_quantity, args['t_start'],
args['t_end'], args['granularity'], args['t_format'], acumulative)
def _get_datetime_iter(t_start, granularity='day',
dt_format='%Y-%m-%d %H:%M:%S'):
"""
Returns an iterator over datetime elements starting at an arbitrary time,
with granularity of a [year,month,day,hour,minute,second].
@param t_start: An arbitrary starting time in format %Y-%m-%d %H:%M:%S
@type t_start: str
@param granularity: The span between iterable elements, default is 'days'.
Possible values are [year,month,day,hour,minute,second].
@type granularity: str
@param dt_format: Format of the 't_start' parameter
@type dt_format: str
@return: An iterator of points in time
@type: iterator over datetime elements
"""
tim = _to_datetime(t_start, dt_format)
# Make a time increment depending on the granularity and the current time
# (the length of years and months vary over time)
span = ""
while True:
yield tim
if granularity == "year":
span = (calendar.isleap(tim.year) and ["days=366"] or ["days=365"])[0]
elif granularity == "month":
span = "days=" + str(calendar.monthrange(tim.year, tim.month)[1])
elif granularity == "day":
span = "days=1"
elif granularity == "hour":
span = "hours=1"
elif granularity == "minute":
span = "minutes=1"
elif granularity == "second":
span = "seconds=1"
else:
# Default just in case
span = "days=1"
tim += eval("datetime.timedelta(" + span + ")")
def _to_datetime(dttime, dt_format='%Y-%m-%d %H:%M:%S'):
"""
Transforms a string into a datetime
"""
return datetime.datetime(*time.strptime(dttime, dt_format)[:6])
def _run_cmd(command):
"""
Runs a certain command and returns the string output. If the command is
not found a string saying so will be returned. Use with caution!
@param command: The UNIX command to execute.
@type command: str
@return: The std-out from the command.
@type: str
"""
return commands.getoutput(command)
def _get_doctypes():
"""Returns all the possible doctypes of a new submission"""
doctypes = [("all", "All")]
for doctype in get_docid_docname_alldoctypes():
doctypes.append(doctype)
return doctypes
def _get_item_statuses():
"""Returns all the possible status of an item"""
return [("available", "Available"), ("requested", "Requested"),
("on loan", "On loan"), ("missing", "Missing")]
def _get_item_doctype():
"""Returns all the possible types of document for an item"""
dts = []
for dat in run_sql("""SELECT DISTINCT(request_type)
FROM crcILLREQUEST ORDER BY request_type ASC"""):
dts.append((dat[0], dat[0]))
return dts
def _get_request_statuses():
"""Returns all the possible statuses for an ILL request"""
dts = []
for dat in run_sql("SELECT DISTINCT(status) FROM crcILLREQUEST ORDER BY status ASC"):
dts.append((dat[0], dat[0]))
return dts
def _get_libraries():
"""Returns all the possible libraries"""
dts = []
for dat in run_sql("SELECT name FROM crcLIBRARY ORDER BY name ASC"):
if not CFG_CERN_SITE or not "CERN" in dat[0]: # do not add internal libraries for CERN site
dts.append((dat[0], dat[0]))
return dts
def _get_loan_periods():
"""Returns all the possible loan periods for an item"""
dts = []
for dat in run_sql("SELECT DISTINCT(loan_period) FROM crcITEM ORDER BY loan_period ASC"):
dts.append((dat[0], dat[0]))
return dts
def _get_tag_name(tag):
"""
For a specific MARC tag, it returns the human-readable name
"""
res = run_sql("SELECT name FROM tag WHERE value LIKE %s", ('%' + tag + '%',))
if res:
return res[0][0]
res = run_sql("SELECT name FROM tag WHERE value LIKE %s", ('%' + tag[:-1] + '%',))
if res:
return res[0][0]
return ''
def _get_collection_recids_for_sql_query(coll):
ids = get_collection_reclist(coll).tolist()
if len(ids) == 0:
return ""
return "id_bibrec IN %s" % str(ids).replace('[', '(').replace(']', ')')
def _check_udc_value_where():
return "id_bibrec IN (SELECT brb.id_bibrec \
FROM bibrec_bib08x brb, bib08x b WHERE brb.id_bibxxx = b.id AND tag='080__a' \
AND value LIKE %s) "
def _get_udc_truncated(udc):
if udc[-1] == '*':
return "%s%%" % udc[:-1]
if udc[0] == '*':
return "%%%s" % udc[1:]
return "%s" % udc
def _check_empty_value(value):
if len(value) == 0:
return ""
else:
return value[0][0]
def _get_granularity_sql_functions(granularity):
try:
return {
"year": ("YEAR",),
"month": ("YEAR", "MONTH",),
"day": ("MONTH", "DAY",),
"hour": ("DAY", "HOUR",),
"minute": ("HOUR", "MINUTE",),
"second": ("MINUTE", "SECOND")
}[granularity]
except KeyError:
return ("MONTH", "DAY",)
def _get_sql_query(creation_time_name, granularity, tables_from, conditions="",
extra_select="", dates_range_param="", group_by=True, count=True):
if len(dates_range_param) == 0:
dates_range_param = creation_time_name
conditions = "%s > %%s AND %s < %%s %s" % (dates_range_param, dates_range_param,
len(conditions) > 0 and "AND %s" % conditions or "")
values = {'creation_time_name': creation_time_name,
'granularity_sql_function': _get_granularity_sql_functions(granularity)[-1],
'count': count and ", COUNT(*)" or "",
'tables_from': tables_from,
'conditions': conditions,
'extra_select': extra_select,
'group_by': ""}
if group_by:
values['group_by'] = "GROUP BY "
for fun in _get_granularity_sql_functions(granularity):
values['group_by'] += "%s(%s), " % (fun, creation_time_name)
values['group_by'] = values['group_by'][:-2]
return "SELECT %(granularity_sql_function)s(%(creation_time_name)s) %(count)s %(extra_select)s \
FROM %(tables_from)s WHERE %(conditions)s \
%(group_by)s \
ORDER BY %(creation_time_name)s DESC" % values
| gpl-2.0 |
kristianeschenburg/parcellearning | parcellearning/mlp/train.py | 1 | 7885 | import argparse, json, os, time
import mlp
from parcellearning.utilities import gnnio
from parcellearning.utilities.early_stop import EarlyStopping
from parcellearning.utilities.batch import partition_graphs
from parcellearning.utilities.load import load_schema, load_model
from shutil import copyfile
from pathlib import Path
import numpy as np
import pandas as pd
import dgl
from dgl.data import register_data_args
import dgl.function as fn
import torch
import torch.nn.functional as F
def main(args):
schema = load_schema(args.schema_file)
out_dir = schema['data']['out']
Path(out_dir).mkdir(parents=True, exist_ok=True)
# copy schema file to output directory
copy_schema = ''.join([out_dir, args.schema_file.split('/')[-1]])
if not os.path.exists(copy_schema):
copyfile(args.schema_file, copy_schema)
##### GET PARAMETERS FROM SCHEMA FILE #####
# - - - - - - - - - - - - - - - - - - - - #
# - - - - - - - - - - - - - - - - - - - - #
MODEL_PARAMS = schema['model_parameters']
OPT_PARAMS = schema['optimizer_parameters']
TRAIN_PARAMS = schema['training_parameters']
STOP_PARAMS = schema['stopping_parameters']
DATA_PARAMS = schema['variable_parameters']
# - - - - - - - - - - - - - - - - - - - - #
# - - - - - - - - - - - - - - - - - - - - #
features = DATA_PARAMS['features']
features.sort()
# load training and validation data
training = gnnio.dataset(features=features,
dSet=schema['data']['training'],
atlas=DATA_PARAMS['response'],
norm=True,
clean=True)
validation = gnnio.dataset(features=features,
dSet=schema['data']['validation'],
atlas=DATA_PARAMS['response'],
norm=True,
clean=True)
validation = dgl.batch(validation)
val_X = validation.ndata['features']
val_Y = validation.ndata['label']
##### MODEL INITIALIZATION #####
# - - - - - - - - - - - - #
# - - - - - - - - - - - - #
# instantiate model using schema parameters
if args.existing:
model_parameters = '%s%s.earlystop.Loss.pt' % (schema['data']['out'], schema['model'])
model = load_model(schema, model_parameters)
model_progress = '%sperformance.%s.json' % (schema['data']['out'], schema['model'])
with open(model_progress, 'r') as f:
progress = json.load(f)
else:
print('Training new model')
model = mlp.MLP(**MODEL_PARAMS)
progress = {k: [] for k in ['Epoch',
'Duration',
'Train Loss',
'Train Acc',
'Val Loss',
'Val Acc']}
print(model)
# instantiate Adam optimizer using scheme parameters
optimizer = torch.optim.Adam(model.parameters(), **OPT_PARAMS)
# initialize early stopper
stopped_model_output='%s%s.earlystop.Loss.pt' % (out_dir, schema['model'])
stopper = EarlyStopping(filename=stopped_model_output, **STOP_PARAMS)
progress = {k: [] for k in ['Epoch',
'Duration',
'Train Loss',
'Train Acc',
'Val Loss',
'Val Acc']}
cross_entropy = torch.nn.CrossEntropyLoss()
dur = []
##### MODEL TRAINING #####
# - - - - - - - - - - - - #
# - - - - - - - - - - - - #
starting_epoch = len(progress['Epoch'])
print('\nTraining model\n')
for epoch in range(starting_epoch, TRAIN_PARAMS['epochs']):
# learn model on training data
batches = partition_graphs(training, TRAIN_PARAMS['n_batch'])
model.train()
t0 = time.time()
# zero the gradients for this epoch
optimizer.zero_grad()
# aggregate training batch losses
train_loss = 0
# aggregate training batch accuracies
train_acc = 0
for iteration, batch in enumerate(batches):
# get training features for this batch
batch_X = batch.ndata['features']
batch_Y = batch.ndata['label']
# push batch through network
batch_logits = model(batch_X)
batch_loss = cross_entropy(batch_logits, batch_Y)
batch_softmax = F.softmax(batch_logits, dim=1)
_, batch_indices = torch.max(batch_softmax, dim=1)
batch_acc = (batch_indices == batch_Y).sum() / batch_Y.shape[0]
# apply backward parameter update pass
batch_loss.backward()
print('Batch: %i | Batch Acc: %.3f | Batch Loss: %.3f ' % (iteration+1, batch_acc.item(), batch_loss.item()))
# update training performance
train_loss += batch_loss
train_acc += batch_acc
# accumulate the gradients from each batch
if (iteration+1) % TRAIN_PARAMS['n_batch'] == 0:
optimizer.step()
optimizer.zero_grad()
dur.append(time.time() - t0)
# switch model into evaluation mode
# so we don't update the gradients using the validation data
model.eval()
with torch.no_grad():
# push validation through network
val_logits = model(val_X)
val_loss = cross_entropy(val_logits, val_Y)
# compute validation performance
val_softmax = F.softmax(val_logits, dim=1)
# accuracy
_, val_indices = torch.max(val_softmax, dim=1)
val_acc = (val_indices == val_Y).sum() / val_Y.shape[0]
train_loss /= TRAIN_PARAMS['n_batch']
train_acc /= TRAIN_PARAMS['n_batch']
# Show current performance
print("Epoch {:05d} | Time(s) {:.4f} | Train Loss {:.4f} | Train Acc {:.4f} | Val Loss {:.4f} | Val Acc {:.4f}".format(epoch,
np.mean(dur),
train_loss.item(),
train_acc.item(),
val_loss.item(),
val_acc.item()))
progress['Epoch'].append(epoch)
if epoch > 3:
progress['Duration'].append(time.time() - t0)
else:
progress['Duration'].append(0)
# update training performance
progress['Train Loss'].append(train_loss.item())
progress['Train Acc'].append(train_acc.item())
# update validation performance
progress['Val Loss'].append(val_loss.item())
progress['Val Acc'].append(val_acc.item())
# set up early stopping criteria on validation loss
early_stop = stopper.step(val_loss.detach().data, model)
if early_stop:
break
##### MODEL SAVING #####
# - - - - - - - - - - - - #
# - - - - - - - - - - - - #
model_output = '%s%s.pt' % (out_dir, schema['model'])
model.save(filename=model_output)
# save performance to json
performance_output = '%sperformance.%s.json' % (out_dir, schema['model'])
with open(performance_output, "w") as outparams:
json.dump(progress, outparams, ensure_ascii=True, indent=4, sort_keys=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='MLP')
parser.add_argument('--schema-file',
type=str,
help='JSON file with parameters for model, training, and output.')
parser.add_argument('--existing',
help='Load pre-existing model to continue training.',
action='store_true',
required=False)
args = parser.parse_args()
main(args)
| mit |
CDSFinance/zipline | zipline/examples/buyapple.py | 11 | 2079 | #!/usr/bin/env python
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from zipline.api import order, record, symbol
def initialize(context):
pass
def handle_data(context, data):
order(symbol('AAPL'), 10)
record(AAPL=data[symbol('AAPL')].price)
# Note: this function can be removed if running
# this algorithm on quantopian.com
def analyze(context=None, results=None):
import matplotlib.pyplot as plt
# Plot the portfolio and asset data.
ax1 = plt.subplot(211)
results.portfolio_value.plot(ax=ax1)
ax1.set_ylabel('Portfolio value (USD)')
ax2 = plt.subplot(212, sharex=ax1)
results.AAPL.plot(ax=ax2)
ax2.set_ylabel('AAPL price (USD)')
# Show the plot.
plt.gcf().set_size_inches(18, 8)
plt.show()
# Note: this if-block should be removed if running
# this algorithm on quantopian.com
if __name__ == '__main__':
from datetime import datetime
import pytz
from zipline.algorithm import TradingAlgorithm
from zipline.utils.factory import load_from_yahoo
# Set the simulation start and end dates
start = datetime(2014, 1, 1, 0, 0, 0, 0, pytz.utc)
end = datetime(2014, 11, 1, 0, 0, 0, 0, pytz.utc)
# Load price data from yahoo.
data = load_from_yahoo(stocks=['AAPL'], indexes={}, start=start,
end=end)
# Create and run the algorithm.
algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data,
identifiers=['AAPL'])
results = algo.run(data)
analyze(results=results)
| apache-2.0 |
mavenlin/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py | 88 | 31139 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer."""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
x_is_dict, y_is_dict = isinstance(
x_shape, dict), y_shape is not None and isinstance(y_shape, dict)
if y_is_dict and n_classes is not None:
assert (isinstance(n_classes, dict))
if batch_size is None:
batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
if x_is_dict:
input_shape = {}
for k, v in list(x_shape.items()):
input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1])
else:
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
def out_el_shape(out_shape, num_classes):
out_shape = list(out_shape[1:]) if len(out_shape) > 1 else []
# Skip first dimension if it is 1.
if out_shape and out_shape[0] == 1:
out_shape = out_shape[1:]
if num_classes is not None and num_classes > 1:
return [batch_size] + out_shape + [num_classes]
else:
return [batch_size] + out_shape
if not y_is_dict:
output_shape = out_el_shape(y_shape, n_classes)
else:
output_shape = dict([
(k, out_el_shape(v, n_classes[k]
if n_classes is not None and k in n_classes else None))
for k, v in list(y_shape.items())
])
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
def setup_train_data_feeder(x,
y,
n_classes,
batch_size=None,
shuffle=True,
epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also
supports iterables.
y: numpy, pandas or Dask array or dictionary of aforementioned. Also
supports
iterables.
n_classes: number of classes. Must be None or same type as y. In case, `y`
is `dict`
(or iterable which returns dict) such that `n_classes[key] = n_classes for
y[key]`
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
x_first_el = six.next(x)
x = itertools.chain([x_first_el], x)
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
for data in x:
if isinstance(data, dict):
for k, v in list(data.items()):
chunk[k].append(v)
if (batch_size is not None) and (len(chunk[k]) >= batch_size):
chunk[k] = np.matrix(chunk[k])
chunk_filled = True
if chunk_filled:
yield chunk
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
else:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
if isinstance(x_first_el, dict):
for k, v in list(data.items()):
chunk[k] = np.matrix(chunk[k])
yield chunk
else:
yield np.matrix(chunk)
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports
iterable.
batch_size: Size of batches to split data into. If `None`, returns one
batch of full size.
Returns:
List or iterator (or dictionary thereof) of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer."""
def __init__(self,
x,
y,
n_classes,
batch_size=None,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: One feature sample which can either Nd numpy matrix of shape
`[n_samples, n_features, ...]` or dictionary of Nd numpy matrix.
y: label vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence of labels.
Can be `None` for unsupervised setting. Also supports dictionary of
labels.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion. Also, if
`y` is `dict`, then `n_classes` must be `dict` such that
`n_classes[key] = n_classes for label y[key]`, `None` otherwise.
batch_size: Mini-batch size to accumulate samples in one mini batch.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features (ndarray or dictionary of ndarrays).
y: Input label (ndarray or dictionary of ndarrays).
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input (or dictionary of shapes).
output_shape: Shape of the output (or dictionary of shapes).
input_dtype: DType of input (or dictionary of shapes).
output_dtype: DType of output (or dictionary of shapes.
"""
x_is_dict, y_is_dict = isinstance(x, dict), y is not None and isinstance(
y, dict)
if isinstance(y, list):
y = np.array(y)
self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items())
]) if x_is_dict else check_array(x, x.dtype)
self._y = None if y is None else \
dict([(k, check_array(v, v.dtype)) for k, v in list(y.items())]) if x_is_dict else check_array(y, y.dtype)
# self.n_classes is not None means we're converting raw target indices to one-hot.
if n_classes is not None:
if not y_is_dict:
y_dtype = (np.int64
if n_classes is not None and n_classes > 1 else np.float32)
self._y = (None if y is None else check_array(y, dtype=y_dtype))
self.n_classes = n_classes
self.max_epochs = epochs
x_shape = dict([(k, v.shape) for k, v in list(self._x.items())
]) if x_is_dict else self._x.shape
y_shape = dict([(k, v.shape) for k, v in list(self._y.items())
]) if y_is_dict else None if y is None else self._y.shape
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
# Input dtype matches dtype of x.
self._input_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())]) if x_is_dict \
else _check_dtype(self._x.dtype)
# note: self._output_dtype = np.float32 when y is None
self._output_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())]) if y_is_dict \
else _check_dtype(self._y.dtype) if y is not None else np.float32
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None and y_is_dict:
for key in list(n_classes.keys()):
if key in self._output_dtype:
self._output_dtype[key] = np.float32
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
num_samples = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
if self._shuffle:
self.indices = self.random_state.permutation(num_samples)
else:
self.indices = np.array(range(num_samples))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(
dtypes.int32, [1], name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
def get_placeholder(shape, dtype, name_prepend):
if shape is None:
return None
if isinstance(shape, dict):
placeholder = {}
for key in list(shape.keys()):
placeholder[key] = array_ops.placeholder(
dtypes.as_dtype(dtype[key]), [None] + shape[key][1:],
name=name_prepend + '_' + key)
else:
placeholder = array_ops.placeholder(
dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend)
return placeholder
self._input_placeholder = get_placeholder(self.input_shape,
self._input_dtype, 'input')
self._output_placeholder = get_placeholder(self.output_shape,
self._output_dtype, 'output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be `None`.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from `x` and `y`.
"""
x_is_dict, y_is_dict = isinstance(
self._x, dict), self._y is not None and isinstance(self._y, dict)
# Assign input features from random indices.
def extract(data, indices):
return (np.array(_access(data, indices)).reshape((indices.shape[0], 1)) if
len(data.shape) == 1 else _access(data, indices))
# assign labels from random indices
def assign_label(data, shape, dtype, n_classes, indices):
shape[0] = indices.shape[0]
out = np.zeros(shape, dtype=dtype)
for i in xrange(out.shape[0]):
sample = indices[i]
# self.n_classes is None means we're passing in raw target indices
if n_classes is None:
out[i] = _access(data, sample)
else:
if n_classes > 1:
if len(shape) == 2:
out.itemset((i, int(_access(data, sample))), 1.0)
else:
for idx, value in enumerate(_access(data, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(data, sample)
return out
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
x_len = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
end = min(x_len, self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# adding input placeholder
feed_dict.update(
dict([(self._input_placeholder[k].name, extract(v, batch_indices))
for k, v in list(self._x.items())]) if x_is_dict else
{self._input_placeholder.name: extract(self._x, batch_indices)})
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= x_len:
self.indices = self.random_state.permutation(
x_len) if self._shuffle else np.array(range(x_len))
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# adding output placeholders
if y_is_dict:
for k, v in list(self._y.items()):
n_classes = (self.n_classes[k] if k in self.n_classes else
None) if self.n_classes is not None else None
shape, dtype = self.output_shape[k], self._output_dtype[k]
feed_dict.update({
self._output_placeholder[k].name:
assign_label(v, shape, dtype, n_classes, batch_indices)
})
else:
shape, dtype, n_classes = self.output_shape, self._output_dtype, self.n_classes
feed_dict.update({
self._output_placeholder.name:
assign_label(self._y, shape, dtype, n_classes, batch_indices)
})
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator each element of which returns one feature sample. Sample can
be a Nd numpy matrix or dictionary of Nd numpy matrices.
y: iterator each element of which returns one label sample. Sample can be
a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many
classes regression values.
n_classes: indicator of how many classes the corresponding label sample
has for the purposes of one-hot conversion of label. In case where `y`
is a dictionary, `n_classes` must be dictionary (with same keys as `y`)
of how many classes there are in each label in `y`. If key is
present in `y` and missing in `n_classes`, the value is assumed `None`
and no one-hot conversion will be applied to the label with that key.
batch_size: Mini batch size to accumulate samples in one batch. If set
`None`, then assumes that iterator to return already batched element.
Attributes:
x: input features (or dictionary of input features).
y: input label (or dictionary of output features).
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input (can be dictionary depending on `x`).
output_shape: shape of the output (can be dictionary depending on `y`).
input_dtype: dtype of input (can be dictionary depending on `x`).
output_dtype: dtype of output (can be dictionary depending on `y`).
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
x_is_dict = isinstance(x_first_el, dict)
y_is_dict = y is not None and isinstance(y_first_el, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
# extract shapes for first_elements
if x_is_dict:
x_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())])
else:
x_first_el_shape = [1] + list(x_first_el.shape)
if y_is_dict:
y_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())])
elif y is None:
y_first_el_shape = None
else:
y_first_el_shape = ([1] + list(y_first_el[0].shape if isinstance(
y_first_el, list) else y_first_el.shape))
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_first_el_shape, y_first_el_shape, n_classes, batch_size)
# Input dtype of x_first_el.
if x_is_dict:
self._input_dtype = dict(
[(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())])
else:
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output dtype of y_first_el.
def check_y_dtype(el):
if isinstance(el, np.ndarray):
return el.dtype
elif isinstance(el, list):
return check_y_dtype(el[0])
else:
return _check_dtype(np.dtype(type(el)))
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0:
self._output_dtype = np.float32
elif y_is_dict:
self._output_dtype = dict(
[(k, check_y_dtype(v)) for k, v in list(y_first_el.items())])
elif y is None:
self._output_dtype = None
else:
self._output_dtype = check_y_dtype(y_first_el)
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
`dict` of input and output tensors.
"""
def init_array(shape, dtype):
"""Initialize array of given shape or dict of shapes and dtype."""
if shape is None:
return None
elif isinstance(shape, dict):
return dict([(k, np.zeros(shape[k], dtype[k]))
for k in list(shape.keys())])
else:
return np.zeros(shape, dtype=dtype)
def put_data_array(dest, index, source=None, n_classes=None):
"""Puts data array into container."""
if source is None:
dest = dest[:index]
elif n_classes is not None and n_classes > 1:
if len(self.output_shape) == 2:
dest.itemset((index, source), 1.0)
else:
for idx, value in enumerate(source):
dest.itemset(tuple([index, idx, value]), 1.0)
else:
if len(dest.shape) > 1:
dest[index, :] = source
else:
dest[index] = source[0] if isinstance(source, list) else source
return dest
def put_data_array_or_dict(holder, index, data=None, n_classes=None):
"""Puts data array or data dictionary into container."""
if holder is None:
return None
if isinstance(holder, dict):
if data is None:
data = {k: None for k in holder.keys()}
assert isinstance(data, dict)
for k in holder.keys():
num_classes = n_classes[k] if (n_classes is not None and
k in n_classes) else None
holder[k] = put_data_array(holder[k], index, data[k], num_classes)
else:
holder = put_data_array(holder, index, data, n_classes)
return holder
if self.stopped:
raise StopIteration
inp = init_array(self.input_shape, self._input_dtype)
out = init_array(self.output_shape, self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
next_inp = six.next(self._x)
inp = put_data_array_or_dict(inp, i, next_inp, None)
except StopIteration:
self.stopped = True
if i == 0:
raise
inp = put_data_array_or_dict(inp, i, None, None)
out = put_data_array_or_dict(out, i, None, None)
break
if self._y is not None:
next_out = six.next(self._y)
out = put_data_array_or_dict(out, i, next_out, self.n_classes)
# creating feed_dict
if isinstance(inp, dict):
feed_dict = dict([(self._input_placeholder[k].name, inp[k])
for k in list(self._input_placeholder.keys())])
else:
feed_dict = {self._input_placeholder.name: inp}
if self._y is not None:
if isinstance(out, dict):
feed_dict.update(
dict([(self._output_placeholder[k].name, out[k])
for k in list(self._output_placeholder.keys())]))
else:
feed_dict.update({self._output_placeholder.name: out})
return feed_dict
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
def __init__(self,
x,
y,
n_classes,
batch_size,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the label has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input label.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
Raises:
ValueError: if `x` or `y` are `dict`, as they are not supported currently.
"""
if isinstance(x, dict) or isinstance(y, dict):
raise ValueError(
'DaskDataFeeder does not support dictionaries at the moment.')
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.Placeholder for input features mini batch.
output_placeholder: tf.Placeholder for output labels.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp, output_placeholder.name: encoded_out}
return _feed_dict_fn
| apache-2.0 |
dinesharanathunga/firewallconfigdecryptor | setup.py | 1 | 2554 | '''
firewallconfigdecryptor: firewall configuration parsing tool
Note that "python setup.py test" invokes pytest on the package. With appropriately
configured setup.cfg, this will check both xxx_test modules and docstrings.
Copyright 2014, dinesha ranathunga.
Licensed under MIT.
'''
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
# This is a plug-in for setuptools that will invoke py.test
# when you run python setup.py test
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest # import here, because outside the required eggs aren't loaded yet
sys.exit(pytest.main(self.test_args))
version = "0.1"
setup(name="firewallconfigdecryptor",
version=version,
description="firewall configuration parsing tool",
# List of packages that this one depends upon:
install_requires = [
'netaddr==0.7.10',
'networkx>=1.7',
'ipaddr==2.1.11',
#'numpy==1.9.2',
'configobj==4.7.0',
#'pyparsing==2.0.3',
#'pytest==2.7.0',
#'six==1.9.0',
#'pytz',
#'pyparsing',
#'freetype',
#'png',
#'matplotlib==1.4.3',
],
classifiers=[ # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 1 - Planning',
'Programming Language :: Python'
'Intended Audience :: Science/Research',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Topic :: System :: Networking',
'Topic :: Scientific/Engineering :: Mathematics',
],
keywords="", # Separate with spaces
author="dinesha ranathunga",
author_email="[email protected]",
url="",
license="MIT",
packages=find_packages(exclude=['examples', 'tests']),
include_package_data=True,
zip_safe=True,
download_url = (""),
tests_require=['pytest'],
cmdclass={'test': PyTest},
# TODO: List executable scripts, provided by the package (this is just an example)
entry_points = {
'console_scripts': [
'firewallconfigdecryptor = firewallconfigdecryptor.console_script:console_entry',
],
}
)
| mit |
franticspider/phagea | python/PyPhagea.py | 1 | 20781 | """
Copyright (C) 2013-2015 Simon Hickinbotham, Hywl Williams, Susan Stepney
This file is part of Phagea.
Phagea is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Phagea is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Phagea. If not, see <http://www.gnu.org/licenses/>.
"""
"""
#need to
# export JAVA_HOME=/usr/lib/jvm/default-java
#before running
#import jpype
"""
#
from jpype import *# JPackage, startJVM, shutdownJVM
#TODO: change this path...
#classpath = "-Djava.class.path=/home/sjh/Desktop/sjh/phagea/workspace/PhagEA/bin/"
#classpath = "-Djava.class.path=/home/sjh/workspace/PhagEApype/jar/"
#Specify the path to the phagea java classes:
classpath = "-Djava.class.path=/home/sjh/git/phagea/src/"
#Specify the path to the jvm:
jvmpath = "/usr/local/java/jdk1.8.0_25/jre/lib/amd64/server/libjvm.so"#"/usr/lib/jvm/default-java/jre/lib/i386/client/libjvm.so"
#classpath = "-Djava.class.path='.'"
#"-ea -Djava.class.path=C:\\Documents and Settings\\Sydney\\Desktop\\jpypeTest\\"
startJVM(jvmpath,"-ea",classpath)
# test: access the basic java functions
java.lang.System.out.println("JPype has connected to the JVM successfully (I think)")
import matplotlib.pyplot as plt
##########################################################
# TESTING NETWORKX
import networkx as nx
import matplotlib.cm as cmx
import matplotlib.colors as colors
import numpy as np
from numpy.core.numeric import zeros
""" there's a sqrt error in plot - this can trap it: """
#np.seterr(invalid='raise')
"""
print '======================================='
print '==== TRYING COMMENTED-OUT NKP CODE ===='
nkpc = JClass("com.phagea.nkpLandscape")
nkp = nkpc(8,3,0.0)
nodes = nkp.getNodes()
edges = nkp.getEdges()
score = nkp.getScores()
G=nx.Graph()
G.add_nodes_from(nodes)
b = np.array(edges)
for i in range(len(b)):
for j in range(len(b[i])):
if b[i][j] == 1:
G.add_edge(i,j)
#Select the layout
pos=nx.spring_layout(G,iterations=2000)
#Get the colormap for the range of scores:
jet = cm = plt.get_cmap('jet')
cNorm = colors.Normalize(vmin=nkp.getScoreMin(), vmax=nkp.getScoreMax())
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
colorList = []
for i in range(len(score)):
colorVal = scalarMap.to_rgba(score[i])
colorList.append(colorVal)
#Do something with the node sizes
ss = np.array(score)
for i in range(len(ss)):
ss[i] = 30 + (12*(1+ss[i]))
##nx.draw_networkx(G, pos=None, ax=None, node_size=[ss[v] for v in G])
#nx.draw_networkx(G, pos, with_labels=False, ax=None, node_color = colorList, node_size = 10)
#plt.xlim(-0.05,1.05)
#plt.ylim(-0.05,1.05)
#plt.axis('off')
#plt.savefig('random_geometric_graph.png')
#plt.show()
"""
##########################################################
#TODO: Not sure if I need this:
#import sys
testPkg = JPackage("com").phagea
Test = testPkg.TestPype
Test.speak("hi")
t = Test()
print t.getString()
print '=============================='
print '====STARTING PHAGEA PROPER===='
print ''
""" Create class variables that we'll get the objects from """
cfc = JClass("com.phagea.phageaConfig")
plc = JClass("com.phagea.phageaLandscape")
"""
Let's define a function that runs a simulation and plots it, given only the config and the image name
"""
def plotsim(cf,outfn):
#load the landscape
pl = plc(cf)
ndims = pl.getNdims();
if ndims == 1:
#this used to work, but no longer!
#mylandscape = JArray(JFloat,1)(pl.get1DLandscape())
#nowadays, we can do it like this:
landscape = pl.get1DLandscape()
#pl.get1DLandscape()
else:
if ndims == 2:
landscape = pl.get2DLandscape()
else:
print 'Ndims = ',ndims, ' ...no landscape created yet (in python)'
#cf.setInitFit(pl.getNdims(),pl.getMaxValue(),pl.getMinValue())
pec = JClass("com.phagea.phageaEngine")
pe = pec(cf,pl)
pe.print_stats(0)
pe.runAlgorithm()
#DO THE PLOTTING NOW:
plt.figure()
#Data for the subtitle:
w = cf.getw()
Rgamma = cf.getrGamma()
Repsilon = cf.getrEpsilon()
Theta = cf.getTheta()
if ndims == 1:
print "Preparing 1D plot"
pst = 'W= %f'%cf.getw()+' Rgamma= %f'%cf.getrGamma()+' Repsilon= %f'%cf.getrEpsilon()+' Theta= %f'%cf.getTheta()
plt.suptitle(pst)
print 'Plotting Cell Histogram'
plt.subplot(231)
CellHist = pe.getCellHist()
plt.imshow(CellHist, origin = 'lower' ,aspect='auto', label='cell')
print 'Plotting Phage Histogram'
plt.subplot(232)
PhageHist = pe.getPhageHist()
plt.imshow(PhageHist, origin = 'lower' ,aspect='auto', label='phage')
print 'Plotting Landscape'
plt.subplot(233)
FitMap = pl.get1DLandscape()
nbin = cf.getNbin()
xx = zeros(nbin)
print 'nbin= ',nbin,' FitMap size = ',len(FitMap)
for i in range (0,nbin):
xx[i] = i
plt.plot(FitMap,xx,label = "fitness")
print 'Plotting Cell Popdy'
plt.subplot(234)
cellpopdy = pe.getCellPopdy()
plt.plot(cellpopdy)
print 'Plotting Phage Popdy'
plt.subplot(235)
phagepopdy = pe.getPhagePopdy()
plt.plot(phagepopdy)
print 'Plotting Resource Dynamics'
plt.subplot(236)
rdy = pe.getRDy()
plt.plot(rdy)
elif ndims == 2:
print "Preparing 2D plot"
pst = 'w='+'{:.1e}'.format(w)+' Rgamma='+"{:.1e}".format(Rgamma)+'\n Repsilon='+"{:.1e}".format(Repsilon)+' theta='+"{:.1e}".format(Theta)
plt.suptitle(pst)
print 'Plotting Cell Histogram'
plt.subplot(231)
CellHist = pe.getCellHist()
plt.imshow(CellHist, origin = 'lower' ,aspect='auto', label='cell')
print 'Plotting Phage Histogram'
plt.subplot(232)
PhageHist = pe.getPhageHist()
plt.imshow(PhageHist, origin = 'lower' ,aspect='auto', label='phage')
print 'Plotting Landscape'
plt.subplot(233)
plt.imshow(landscape, origin = 'lower' ,aspect='auto', label='landscape')
print 'Plotting Cell Popdy'
plt.subplot(234)
cellpopdy = pe.getCellPopdy()
plt.plot(cellpopdy)
print 'Plotting Phage Popdy'
plt.subplot(235)
phagepopdy = pe.getPhagePopdy()
plt.plot(phagepopdy)
print 'Plotting Resource Dynamics'
plt.subplot(236)
rdy = pe.getRDy()
plt.plot(rdy)
else:
print "TODO: Prepare N-Dimensional plot"
#newname = cfgfile.replace('.cfg', '.png')
plt.savefig(outfn)
#newname = cfgfile.replace('.cfg', '.pdf')
#plt.savefig(newname,format='pdf')
return
print '=================================='
print '==== STARTING NEW PHAGEA 2D EXPTS ===='
#Load the config
cfgfile = sys.argv[1]
cf = cfc(cfgfile)
#CREATE THE DIFFERENT CONFIGURATIONS WE ARE INVESTIGATING
cf.setRandInit(False)
cf.setRescaling(False)
cf.setReplenish(False)
plotsim(cf,"/home/sjh/Desktop/Dropbox/Hywl/2015/runs/outfilenRFFF.png")
#CREATE THE DIFFERENT CONFIGURATIONS WE ARE INVESTIGATING
cf.setRandInit(False)
cf.setRescaling(False)
cf.setReplenish(True)
plotsim(cf,"/home/sjh/Desktop/Dropbox/Hywl/2015/runs/outfilenRFFT.png")
#CREATE THE DIFFERENT CONFIGURATIONS WE ARE INVESTIGATING
cf.setRandInit(False)
cf.setRescaling(True)
cf.setReplenish(False)
plotsim(cf,"/home/sjh/Desktop/Dropbox/Hywl/2015/runs/outfilenRFTF.png")
#CREATE THE DIFFERENT CONFIGURATIONS WE ARE INVESTIGATING
cf.setRandInit(False)
cf.setRescaling(True)
cf.setReplenish(True)
plotsim(cf,"/home/sjh/Desktop/Dropbox/Hywl/2015/runs/outfilenRFTT.png")
cf.setRandInit(True)
cf.setRescaling(False)
cf.setReplenish(False)
plotsim(cf,"/home/sjh/Desktop/Dropbox/Hywl/2015/runs/outfilenRTFF.png")
#CREATE THE DIFFERENT CONFIGURATIONS WE ARE INVESTIGATING
cf.setRandInit(True)
cf.setRescaling(False)
cf.setReplenish(True)
plotsim(cf,"/home/sjh/Desktop/Dropbox/Hywl/2015/runs/outfilenRTFT.png")
#CREATE THE DIFFERENT CONFIGURATIONS WE ARE INVESTIGATING
cf.setRandInit(True)
cf.setRescaling(True)
cf.setReplenish(False)
plotsim(cf,"/home/sjh/Desktop/Dropbox/Hywl/2015/runs/outfilenRTTF.png")
#CREATE THE DIFFERENT CONFIGURATIONS WE ARE INVESTIGATING
cf.setRandInit(True)
cf.setRescaling(True)
cf.setReplenish(True)
plotsim(cf,"/home/sjh/Desktop/Dropbox/Hywl/2015/runs/outfilenRTTT.png")
"""
#load the landscape
pl = plc(cf)
ndims = pl.getNdims();
if ndims == 1:
#this used to work, but no longer!
#mylandscape = JArray(JFloat,1)(pl.get1DLandscape())
#nowadays, we can do it like this:
landscape = pl.get1DLandscape()
#pl.get1DLandscape()
else:
if ndims == 2:
landscape = pl.get2DLandscape()
else:
print 'Ndims = ',ndims, ' ...no landscape created yet (in python)'
#cf.setInitFit(pl.getNdims(),pl.getMaxValue(),pl.getMinValue())
pec = JClass("com.phagea.phageaEngine")
pe = pec(cf,pl)
pe.print_stats(0)
pe.runAlgorithm()
#DO THE PLOTTING NOW:
plt.figure()
#Data for the subtitle:
w = cf.getw()
Rgamma = cf.getrGamma()
Repsilon = cf.getrEpsilon()
Theta = cf.getTheta()
if ndims == 1:
print "Preparing 1D plot"
pst = 'W= %f'%cf.getw()+' Rgamma= %f'%cf.getrGamma()+' Repsilon= %f'%cf.getrEpsilon()+' Theta= %f'%cf.getTheta()
plt.suptitle(pst)
print 'Plotting Cell Histogram'
plt.subplot(231)
CellHist = pe.getCellHist()
plt.imshow(CellHist, origin = 'lower' ,aspect='auto', label='cell')
print 'Plotting Phage Histogram'
plt.subplot(232)
PhageHist = pe.getPhageHist()
plt.imshow(PhageHist, origin = 'lower' ,aspect='auto', label='phage')
print 'Plotting Landscape'
plt.subplot(233)
FitMap = pl.get1DLandscape()
nbin = cf.getNbin()
xx = zeros(nbin)
print 'nbin= ',nbin,' FitMap size = ',len(FitMap)
for i in range (0,nbin):
xx[i] = i
plt.plot(FitMap,xx,label = "fitness")
print 'Plotting Cell Popdy'
plt.subplot(234)
cellpopdy = pe.getCellPopdy()
plt.plot(cellpopdy)
print 'Plotting Phage Popdy'
plt.subplot(235)
phagepopdy = pe.getPhagePopdy()
plt.plot(phagepopdy)
print 'Plotting Resource Dynamics'
plt.subplot(236)
rdy = pe.getRDy()
plt.plot(rdy)
elif ndims == 2:
print "Preparing 2D plot"
pst = 'w='+'{:.1e}'.format(w)+' Rgamma='+"{:.1e}".format(Rgamma)+'\n Repsilon='+"{:.1e}".format(Repsilon)+' theta='+"{:.1e}".format(Theta)
plt.suptitle(pst)
print 'Plotting Cell Histogram'
plt.subplot(231)
CellHist = pe.getCellHist()
plt.imshow(CellHist, origin = 'lower' ,aspect='auto', label='cell')
print 'Plotting Phage Histogram'
plt.subplot(232)
PhageHist = pe.getPhageHist()
plt.imshow(PhageHist, origin = 'lower' ,aspect='auto', label='phage')
print 'Plotting Landscape'
plt.subplot(233)
plt.imshow(landscape, origin = 'lower' ,aspect='auto', label='landscape')
print 'Plotting Cell Popdy'
plt.subplot(234)
cellpopdy = pe.getCellPopdy()
plt.plot(cellpopdy)
print 'Plotting Phage Popdy'
plt.subplot(235)
phagepopdy = pe.getPhagePopdy()
plt.plot(phagepopdy)
print 'Plotting Resource Dynamics'
plt.subplot(236)
rdy = pe.getRDy()
plt.plot(rdy)
else:
print "TODO: Prepare N-Dimensional plot"
newname = cfgfile.replace('.cfg', '.png')
plt.savefig(newname)
newname = cfgfile.replace('.cfg', '.pdf')
plt.savefig(newname,format='pdf')
#plt.show()
shutdownJVM()
#==========================================================================================================================
print '=================================='
print '==== STARTING OLD PHAGEA 2D EXPTS ===='
#pl = plc("CONE")
#pl = plc("SOMBRERO")
#pl = plc("THREEHILL")
pl = plc("RASTRIGIN")
print 'landscape type = '+pl.getTypeString()
# //javastuff:
# phageaConfig config = new phageaConfig();
# config.setStandardParams();
cf = cfc(pl.getNdims())
resc = cf.getRescaling()
#//java:
#phageaLandscape landscape = new phageaLandscape(config.type.name(), config.rescaling);
cf.setStandardParams()
cf.setPhageCount(0)
Sombland = JArray(JFloat,2)(pl.get2DLandscape(cf.getRescaling()))
import matplotlib.pyplot as plt
plt.figure(2)
plt.imshow(Sombland, origin = 'lower' ,aspect='auto', label='cell')
plt.show()
#########################################################
#from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import matplotlib.pyplot as plt
import numpy as np
bignum = 100
mat = Sombland #np.random.random((bignum, bignum))
X, Y = np.mgrid[:bignum, :bignum]
fig = plt.figure()
ax = fig.add_subplot(1,1,1, projection='3d')
surf = ax.plot_surface(X,Y,mat)
plt.show()
##########################################################
print '=================================='
print '==== FINISHED PHAGEA 2D EXPTS ===='
#Mimicing the java call sequence:
# /** Load what we are doing from config */
# phageaConfig config = new phageaConfig(args[0]);
# String name = config.getTypeName();
cfgfile = sys.argv[1]
cf = cfc(cfgfile)
typen = cf.getTypeName()
#
# phageaLandscape landscape = new phageaLandscape(name);
#
#pl = plc(cf)
pl = plc(cf,2)
# landscape.findNKPMaxMin();
pl.findNKPMaxMin();
# we'll need some variables for nkp
G = []
pos = []
plotNKP = False
if typen in ['NKP']:
if cf.getNKPN() < 14:
plotNKP = True
ndims = pl.getNdims();
if ndims == 1:
landscape = JArray(JFloat,1)(pl.get1DLandscape())
else:
if ndims == 2:
landscape = JArray(JFloat,2)(pl.get2DLandscape())
else:
if plotNKP:
landscape = JArray(JFloat,1)(pl.get1DnkpLandscape())
cf.setInitFit(pl.getNdims(),pl.getMaxValue(),pl.getMinValue())
pec = JClass("com.phagea.phageaEngine")
pe = pec(cf,pl)
#pe.print_stats(0)
pe.runAlgorithm()
print 'Replenished ' , pe.getReplenCount() , 'times'
#=======================
if plotNKP:
nodes = JArray(JInt,1)(pl.getNKPNodes())
score = JArray(JFloat,1)(pl.getNKPScores())
edges = JArray(JInt,2)(pl.getNKPEdges())
G=nx.Graph()
G.add_nodes_from(nodes)
b = np.array(edges)
for i in range(len(b)):
for j in range(len(b[i])):
if b[i][j] == 1:
G.add_edge(i,j)
#import math
#kval = 1.5/math.sqrt(len(nodes))
pos=nx.spring_layout(G)#,k=kval)
#so far, we can't get the graphviz layouts for high N to work...
#import pygraphviz
#from networkx import graphviz_layout
#print "networkx version "+nx.__version__
#pos=nx.graphviz_layout(G,"sfdp")
w = cf.getw()
Rgamma = cf.getrGamma()
Repsilon = cf.getrEpsilon()
Theta = cf.getTheta()
if(ndims <=2):
CellHist = JArray(JInt,2)(pe.getCellHist())
PhageHist = JArray(JInt,2)(pe.getPhageHist())
if(plotNKP):
CellHist = JArray(JInt,2)(pe.getCellHist())
PhageHist = JArray(JInt,2)(pe.getPhageHist())
#PhageHist = pe.getPhageHist()
#cpRatioHist = pe.getcpRatioHist()
#nbin = cf.getNbin()
#xx = zeros(nbin)
#for i in range (0,nbin):
# xx[i] = i
#FitMap = pe.getFitMap()
cellpopdy = JArray(JInt,1)(pe.getCellPopdy())
phagepopdy = JArray(JInt,1)(pe.getPhagePopdy())
rdy = JArray(JFloat,1)(pe.getRDy())
#Reprint the configuration for checking
print("")
print("=====================================")
print("CONFIGURATION:")
cfgstr = cf.printConfig()
print cfgstr
print("=====================================")
print("")
plt.figure(2)
mynodesize = 40
###CREATE THE GRAPHIC FOR THE HOST EVOLUTION OVER THE LANDSCAPE###
#pst = 'w='+'{:.1e}'.format(w)+' Rgamma='+"{:.1e}".format(Rgamma)+'\n Repsilon='+"{:.1e}".format(Repsilon)+' theta='+"{:.1e}".format(Theta)
pst = 'N= %d'%cf.getNKPN()+' K= %d'%cf.getNKPK()+' P='+"%0.2f"%cf.getNKPP()+'\n sigma^2='+"{:.1e}".format(cf.getSigma2())
plt.suptitle(pst)
plt.subplot(231)
if plotNKP:
#plt.ylim(0,pl.getNKPdim())
#plt.ylim(0,pl.getNKPdim())
###try to draw the network
jet = cm = plt.get_cmap('jet')
cNorm = colors.Normalize(vmin=0, vmax=cf.getT())
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
colorListh = []
sizeListh = []
for i in range(len(CellHist)):
colorVal = scalarMap.to_rgba(CellHist[i][0])
colorListh.append(colorVal)
sizeVal = 2 * mynodesize * CellHist[i][0] / cf.getT()
sizeListh.append(sizeVal)
nx.draw_networkx(G, pos, with_labels=False, linewidths=0, ax=None, node_color = colorListh, node_size = sizeListh)
plt.xlim(-0.05,1.05)
plt.ylim(-0.05,1.05)
plt.axis('off')
else:
if(ndims <= 2):
plt.imshow(CellHist, origin = 'lower' ,aspect='auto', label='cell')
plt.subplot(232)
if plotNKP:
#plt.ylim(0,pl.getNKPdim())
###try to draw the network
jet = cm = plt.get_cmap('jet')
cNorm = colors.Normalize(vmin=0, vmax=cf.getT())
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
colorListp = []
sizeListp = []
for i in range(len(score)):
colorVal = scalarMap.to_rgba(PhageHist[i][0])
colorListp.append(colorVal)
sizeVal = 2 * mynodesize * PhageHist[i][0] / cf.getT()
sizeListp.append(sizeVal)
nx.draw_networkx(G, pos, with_labels=False, linewidths=0, ax=None, node_color = colorListp, node_size = sizeListp)
plt.xlim(-0.05,1.05)
plt.ylim(-0.05,1.05)
plt.axis('off')
else:
if(ndims <= 2):
plt.imshow(PhageHist, origin = 'lower' ,aspect='auto', label='phage')
plt.subplot(233)
### PLOT THE LANDSCAPE - DIFFERENT STRATEGIES FOR DIFFERENT DIMENSIONS...
if ndims == 1:
FitMap = JArray(JFloat,1)(pl.get1DLandscape())
nbin = cf.getNbin()
xx = zeros(nbin)
print 'nbin= ',nbin,' FitMap size = ',len(FitMap)
for i in range (0,nbin):
xx[i] = i
plt.plot(FitMap,xx,label = "fitness")
else:
if ndims == 2:
plt.imshow(landscape, origin = 'lower' ,aspect='auto', label='landscape')
else:
if plotNKP:
###Assume we have an NKP Landscape for now
#nbin = len(landscape)
#xx = zeros(nbin)
#for i in range (0,nbin):
# xx[i] = i
#plt.ylim(0,nbin)
#plt.plot(landscape,xx,label = "fitness")
###try to draw the network
jet = cm = plt.get_cmap('jet')
cNorm = colors.Normalize(vmin=pl.getNKPScoreMin(), vmax=pl.getNKPScoreMax())
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=jet)
colorList = []
sizeList = []
for i in range(len(score)):
colorVal = scalarMap.to_rgba(score[i])
colorList.append(colorVal)
sizeVal = 2 * mynodesize * (score[i]-min(score)) / (max(score)-min(score))
sizeList.append(sizeVal)
nx.draw_networkx(G, pos, with_labels=False, linewidths=0, ax=None, node_color = colorList, node_size = sizeList)
plt.xlim(-0.05,1.05)
plt.ylim(-0.05,1.05)
plt.axis('off')
#plt.savefig('random_geometric_graph.png')
#plt.show()
#plt.imshow(landscape, origin = 'lower' ,aspect='auto', label='landscape')
#plt.imshow(cpRatioHist, origin = 'lower' ,aspect='auto', label='phage/cell ratio')
#plt.subplot(244)
#plt.plot(FitMap,xx,label = "fitness")
plt.subplot(234)
###To set the y axis range, uncomment this:
#plt.ylim((600,1600))
plt.plot(cellpopdy)
plt.subplot(235)
plt.plot(phagepopdy)
plt.subplot(236)
###uncomment this if you want to look at resource dynamics:
#plt.plot(rdy)
fmax = JArray(JFloat,1)(pe.getFMaxT())
fmin = JArray(JFloat,1)(pe.getFMinT())
plt.plot(fmax)
plt.plot(fmin)
if cf.getRescaling():
obsmax = JArray(JFloat,1)(pe.getObsMaxT())
obsmin = JArray(JFloat,1)(pe.getObsMinT())
plt.plot(obsmax)
plt.plot(obsmin)
#plt.savefig("fig.png")
#plt.plot(fmax)
#plt.plot(fmin)
plt.show()
"""
#shutdownJVM()
| gpl-3.0 |
vivekmishra1991/scikit-learn | sklearn/utils/graph.py | 289 | 6239 | """
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <[email protected]>
# Gael Varoquaux <[email protected]>
# Jake Vanderplas <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
| bsd-3-clause |
nanditav/15712-TensorFlow | tensorflow/contrib/learn/python/learn/dataframe/transforms/in_memory_source.py | 82 | 6157 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sources for numpy arrays and pandas DataFrames."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions
class BaseInMemorySource(transform.TensorFlowTransform):
"""Abstract parent class for NumpySource and PandasSource."""
def __init__(self,
data,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="in_memory_data"):
super(BaseInMemorySource, self).__init__()
self._data = data
self._num_threads = 1 if num_threads is None else num_threads
self._batch_size = (32 if batch_size is None else batch_size)
self._enqueue_size = max(1, int(self._batch_size / self._num_threads)
) if enqueue_size is None else enqueue_size
self._queue_capacity = (self._batch_size * 10 if queue_capacity is None else
queue_capacity)
self._shuffle = shuffle
self._min_after_dequeue = (batch_size if min_after_dequeue is None else
min_after_dequeue)
self._seed = seed
self._data_name = data_name
@transform.parameter
def data(self):
return self._data
@transform.parameter
def num_threads(self):
return self._num_threads
@transform.parameter
def enqueue_size(self):
return self._enqueue_size
@transform.parameter
def batch_size(self):
return self._batch_size
@transform.parameter
def queue_capacity(self):
return self._queue_capacity
@transform.parameter
def shuffle(self):
return self._shuffle
@transform.parameter
def min_after_dequeue(self):
return self._min_after_dequeue
@transform.parameter
def seed(self):
return self._seed
@transform.parameter
def data_name(self):
return self._data_name
@property
def input_valency(self):
return 0
def _apply_transform(self, transform_input, **kwargs):
queue = feeding_functions.enqueue_data(self.data,
self.queue_capacity,
self.shuffle,
self.min_after_dequeue,
num_threads=self.num_threads,
seed=self.seed,
name=self.data_name,
enqueue_size=self.enqueue_size,
num_epochs=kwargs.get("num_epochs"))
dequeued = queue.dequeue_many(self.batch_size)
# TODO(jamieas): dequeue and dequeue_many will soon return a list regardless
# of the number of enqueued tensors. Remove the following once that change
# is in place.
if not isinstance(dequeued, (tuple, list)):
dequeued = (dequeued,)
# pylint: disable=not-callable
return self.return_type(*dequeued)
class NumpySource(BaseInMemorySource):
"""A zero-input Transform that produces a single column from a numpy array."""
@property
def name(self):
return "NumpySource"
@property
def _output_names(self):
return ("index", "value")
class OrderedDictNumpySource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a dict of numpy arrays."""
def __init__(self,
ordered_dict_of_arrays,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in ordered_dict_of_arrays.keys():
raise ValueError("Column name `index` is reserved.")
super(OrderedDictNumpySource, self).__init__(ordered_dict_of_arrays,
num_threads, enqueue_size,
batch_size, queue_capacity,
shuffle, min_after_dequeue,
seed, data_name)
@property
def name(self):
return "OrderedDictNumpySource"
@property
def _output_names(self):
return tuple(["index"] + list(self._data.keys()))
class PandasSource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a DataFrame."""
def __init__(self,
dataframe,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in dataframe.columns:
raise ValueError("Column name `index` is reserved.")
super(PandasSource, self).__init__(dataframe, num_threads, enqueue_size,
batch_size, queue_capacity, shuffle,
min_after_dequeue, seed, data_name)
@property
def name(self):
return "PandasSource"
@property
def _output_names(self):
return tuple(["index"] + self._data.columns.tolist())
| apache-2.0 |
maciekcc/tensorflow | tensorflow/contrib/timeseries/examples/predict.py | 69 | 5579 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An example of training and predicting with a TFTS estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
FLAGS = None
def structural_ensemble_train_and_predict(csv_file_name):
# Cycle between 5 latent values over a period of 100. This leads to a very
# smooth periodic component (and a small model), which is a good fit for our
# example data. Modeling high-frequency periodic variations will require a
# higher cycle_num_latent_values.
structural = tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=100, num_features=1, cycle_num_latent_values=5)
return train_and_predict(structural, csv_file_name, training_steps=150)
def ar_train_and_predict(csv_file_name):
# An autoregressive model, with periodicity handled as a time-based
# regression. Note that this requires windows of size 16 (input_window_size +
# output_window_size) for training.
ar = tf.contrib.timeseries.ARRegressor(
periodicities=100, input_window_size=10, output_window_size=6,
num_features=1,
# Use the (default) normal likelihood loss to adaptively fit the
# variance. SQUARED_LOSS overestimates variance when there are trends in
# the series.
loss=tf.contrib.timeseries.ARModel.NORMAL_LIKELIHOOD_LOSS)
return train_and_predict(ar, csv_file_name, training_steps=600)
def train_and_predict(estimator, csv_file_name, training_steps):
"""A simple example of training and predicting."""
# Read data in the default "time,value" CSV format with no header
reader = tf.contrib.timeseries.CSVReader(csv_file_name)
# Set up windowing and batching for training
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=16, window_size=16)
# Fit model parameters to data
estimator.train(input_fn=train_input_fn, steps=training_steps)
# Evaluate on the full dataset sequentially, collecting in-sample predictions
# for a qualitative evaluation. Note that this loads the whole dataset into
# memory. For quantitative evaluation, use RandomWindowChunker.
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Predict starting after the evaluation
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=200)))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, 0]
mean = np.squeeze(np.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
variance = np.squeeze(np.concatenate(
[evaluation["covariance"][0], predictions["covariance"]], axis=0))
all_times = np.concatenate([times, predictions["times"]], axis=0)
upper_limit = mean + np.sqrt(variance)
lower_limit = mean - np.sqrt(variance)
return times, observed, all_times, mean, upper_limit, lower_limit
def make_plot(name, training_times, observed, all_times, mean,
upper_limit, lower_limit):
"""Plot a time series in a new figure."""
pyplot.figure()
pyplot.plot(training_times, observed, "b", label="training series")
pyplot.plot(all_times, mean, "r", label="forecast")
pyplot.plot(all_times, upper_limit, "g", label="forecast upper bound")
pyplot.plot(all_times, lower_limit, "g", label="forecast lower bound")
pyplot.fill_between(all_times, lower_limit, upper_limit, color="grey",
alpha="0.2")
pyplot.axvline(training_times[-1], color="k", linestyle="--")
pyplot.xlabel("time")
pyplot.ylabel("observations")
pyplot.legend(loc=0)
pyplot.title(name)
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
make_plot("Structural ensemble",
*structural_ensemble_train_and_predict(FLAGS.input_filename))
make_plot("AR", *ar_train_and_predict(FLAGS.input_filename))
pyplot.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_filename",
type=str,
required=True,
help="Input csv file.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
cmeessen/fatiando | gallery/seismic/convolutional_model.py | 6 | 1804 | r"""
Synthetic seismograms using the convolutional model
---------------------------------------------------
The simplest way to get a seismogram (in time x offset) is through the
convolutional model
.. math::
trace(t) = wavelet(t) \ast reflectivity(t)
Module :mod:`fatiando.seismic.conv` defines functions for doing this
convolution, calculating the required reflectivity, and converting from depth a
model into time.
"""
import numpy as np
import matplotlib.pyplot as plt
from fatiando.seismic import conv
from fatiando.vis import mpl
# Define the parameters of our depth model
n_samples, n_traces = [600, 100]
velocity = 1500*np.ones((n_samples, n_traces))
# We'll put two interfaces in depth
velocity[150:, :] = 2000
velocity[400:, :] = 3500
dt = 2e-3
# We need to convert the depth model we made above into time
vel_l = conv.depth_2_time(velocity, velocity, dt=dt, dz=1)
# and we'll assume the density is homogeneous
rho_l = 2200*np.ones(np.shape(vel_l))
# With that, we can calculate the reflectivity model in time
rc = conv.reflectivity(vel_l, rho_l)
# and finally perform our convolution
synt = conv.convolutional_model(rc, 30, conv.rickerwave, dt=dt)
# We can use the utility function in fatiando.vis.mpl to plot the seismogram
fig, axes = plt.subplots(1, 2, figsize=(8, 5))
ax = axes[0]
ax.set_title("Velocity model (in depth)")
tmp = ax.imshow(velocity, extent=[0, n_traces, n_samples, 0],
cmap="copper", aspect='auto', origin='upper')
fig.colorbar(tmp, ax=ax, pad=0, aspect=50)
ax.set_xlabel('Trace')
ax.set_ylabel('Depth (m)')
ax = axes[1]
ax.set_title("Synthetic seismogram")
mpl.seismic_wiggle(synt[:, ::20], dt, scale=1)
mpl.seismic_image(synt, dt, cmap="RdBu_r", aspect='auto')
ax.set_xlabel('Trace')
ax.set_ylabel('Time (s)')
plt.tight_layout()
plt.show()
| bsd-3-clause |
AIML/scikit-learn | examples/ensemble/plot_gradient_boosting_regression.py | 227 | 2520 | """
============================
Gradient Boosting regression
============================
Demonstrate Gradient Boosting on the Boston housing dataset.
This example fits a Gradient Boosting model with least squares loss and
500 regression trees of depth 4.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
###############################################################################
# Load data
boston = datasets.load_boston()
X, y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
###############################################################################
# Fit regression model
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 1,
'learning_rate': 0.01, 'loss': 'ls'}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
###############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, boston.feature_names[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
| bsd-3-clause |
fengz10/ICN_SCM | resultsPlot/plotCostCacheRatio.py | 1 | 5727 | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
ratio = np.arange(0, 1.01, 0.05)
# Data from AS 7018
# Other inputs are: main(7018, ratio, 3, 0.7, 0)
#cost7018Algo0= [0.32816886868289885, 0.2208175601847247, 0.19251093813725606, 0.19213511824943674, 0.13080090885015863, 0.11537045883303, 0.09633102587606322, 0.07323609921126092, 0.09878431485204239, 0.05596993118756234, 0.049715428600860076, 0.03831044043548079, 0.06096882335052019, 0.028069296806645244, 0.022701113201547628, 0.021278104632073535, 0.013507052814121217, 0.026153265780405686, 0.0077527277733091485, 0.0031476608134015918, 0.0]
#cost7018Algo1= [0.4642333921367071, 0.4639016472999131, 0.34012817677104124, 0.3340296081862053, 0.2969448732402059, 0.2461004974625, 0.2293186323481248, 0.24091214725010093, 0.19757595572768757, 0.1573243340317513, 0.18058477722058466, 0.12481420007040704, 0.10774395529769458, 0.09029783974164379, 0.09634065693940172, 0.08223281518431566, 0.09567750993009355, 0.04277884030146324, 0.028210440967304917, 0.017676653196543032, 0.004847344478943003]
cost7018Algo0= [0.2688624303959933, 0.230281717027491, 0.18411789823954128, 0.1588118516922253, 0.12096749983086721, 0.1113147673120107, 0.1030153423862696, 0.09073808693426455, 0.0694635759217686, 0.05730782846758849, 0.046710212601724366, 0.048981939171668, 0.04710864836168703, 0.031022713107087735, 0.029085479109200608, 0.02071990211759934, 0.01666252096967843, 0.010496896997432531, 0.008803391094234036, 0.003640309725101591, 0.0]
cost7018Algo1= [0.5487645391435545, 0.44189141471566334, 0.38568493049488173, 0.4030654170364792, 0.31721279641850014, 0.3272248867582743, 0.2916132651694123, 0.26046316542276504, 0.19867071620188387, 0.20522911767970617, 0.1809405986969883, 0.17916122132774306, 0.14308778463715827, 0.12360898556019714, 0.11913902854109173, 0.08162315898761406, 0.07484068337659229, 0.05497196024192774, 0.03447745192320582, 0.019541786345988885, 0.0063849960404094115]
max7018 = max(cost7018Algo0+ cost7018Algo1)
cost7018Algo0 = np.array(cost7018Algo0)/max7018
cost7018Algo1 = np.array(cost7018Algo1)/max7018
cost3356Algo0= [0.2783048731245389, 0.2872432864239341, 0.2230858096827762, 0.2056502418089665, 0.19798701702638456, 0.17357493305249336, 0.1469459407732046, 0.10761056872908284, 0.0997999691042518, 0.09213445289884156, 0.092158282436989, 0.0660532265325018, 0.06295908184506546, 0.048358314991151494, 0.04045873505958472, 0.0315771474462309, 0.02590217889984478, 0.01820272272729274, 0.012580744472094682, 0.007715350715371399, 0.0]
cost3356Algo1= [0.465620584858123, 0.40481693415398573, 0.3875690687641232, 0.43758852738211623, 0.3165725611715618, 0.3024823915734399, 0.26220346926993093, 0.2474230687700476, 0.24382857244792552, 0.24630388944790402, 0.1923745910236931, 0.1700306921862092, 0.16211949681867985, 0.16401967368367437, 0.1757433455069511, 0.10131859386229136, 0.11147997172614468, 0.0849289440996695, 0.0715091535328963, 0.06055701261597678, 0.02902765654616049]
max3356 = max(cost3356Algo0+ cost3356Algo1)
cost3356Algo0 = np.array(cost3356Algo0)/max3356
cost3356Algo1 = np.array(cost3356Algo1)/max3356
#########################Calculate reduced ratio############################
avg7018Alg0 = sum(cost7018Algo0)/len(cost7018Algo0)
avg7018Alg1 = sum(cost7018Algo1)/len(cost7018Algo1)
print 'cost reduced ratio of 7018'
print 'reduced ration = ', (avg7018Alg1 - avg7018Alg0)/avg7018Alg1
avg3356Alg0 = sum(cost3356Algo0)/len(cost3356Algo0)
avg3356Alg1 = sum(cost3356Algo1)/len(cost3356Algo1)
print 'cost reduced ratio of 3356'
print 'reduced ration = ', (avg3356Alg1 - avg3356Alg0)/avg3356Alg1
#######################################################################
cost2914Algo0= [0.2985484020511996, 0.3678774231683738, 0.22471345430585563, 0.16939513694727085, 0.16470375883534655, 0.1873554643571797, 0.12180635895053124, 0.08917217445175339, 0.09418009720668301, 0.08602255944435719, 0.06227133969465918, 0.05742111452629443, 0.08499951928274836, 0.04435617393119315, 0.03557660115877804, 0.04948879687046496, 0.027409612835835533, 0.01611151133230808, 0.011949694759354836, 0.005433847749368355, 0.0]
cost2914Algo1= [0.4797497500434348, 0.36931227813986284, 0.3535122401694411, 0.3566576719805158, 0.38640821560707106, 0.716213466629919, 0.2935314839639993, 0.39111587647765933, 0.25428746739263486, 0.2130828579687878, 0.20757244130193064, 0.17380156664179708, 0.2115447151728378, 0.15569996960790217, 0.12753165995647436, 0.16329856974988097, 0.1115194699256139, 0.10639665060119938, 0.0932164906718553, 0.09090296502695513, 0.29667733078299985]
max2914 = max(cost2914Algo0+ cost2914Algo1)
cost2914Algo0 = np.array(cost2914Algo0)/max2914
cost2914Algo1 = np.array(cost2914Algo1)/max2914
# Comment
plt.plot(ratio, cost7018Algo0, "kD-", label="SCM-base AS7018", markersize = 8, linewidth=2)
plt.plot(ratio, cost7018Algo1, "ro--", label="Baseline AS7018", markersize = 8, linewidth=2)
plt.plot(ratio, cost3356Algo0, "b^-",label="SCM-base AS3356", markersize = 8, linewidth=2)
plt.plot(ratio, cost3356Algo1, "gv--",label="Baseline AS3356", markersize = 8, linewidth=2)
#plt.plot(ratio, cost2914Algo0, "g<-", label ="SCM-base AS2914", markersize = 8, linewidth=2)
#plt.plot(ratio, cost2914Algo1, "g>--", label ="Baseline AS2914", markersize = 8, linewidth=2)
plt.xticks(np.arange(0,1.1,0.1),('0', '0.1', '0.2', '0.3', '0.4', '0.5', \
'0.6', '0.7', '0.8', '0.9', '1.0'), fontsize = 14)
plt.yticks(np.arange(0, 1.1, 0.2), ('0', '0.2', '0.4', '0.6', '0.8', '1.0'), fontsize = 14)
plt.ylim([0, 1.03])
plt.xlabel('Replicate ratio (%)',fontsize = 14)
plt.ylabel('Normalized cost',fontsize = 14)
plt.legend(fontsize = 14)
#pylab.legend(loc='upper right')
plt.tight_layout()
plt.show() | gpl-2.0 |
astocko/statsmodels | statsmodels/genmod/tests/test_glm.py | 6 | 37718 | """
Test functions for models.GLM
"""
from statsmodels.compat import range
import os
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal, assert_raises,
assert_allclose, assert_, assert_array_less, dec)
from scipy import stats
import statsmodels.api as sm
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.tools.tools import add_constant
from statsmodels.tools.sm_exceptions import PerfectSeparationError
from statsmodels.discrete import discrete_model as discrete
from nose import SkipTest
import warnings
# Test Precisions
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
DECIMAL_0 = 0
try:
import matplotlib.pyplot as plt #makes plt available for test functions
have_matplotlib = True
except:
have_matplotlib = False
pdf_output = False
if pdf_output:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages("test_glm.pdf")
else:
pdf = None
def close_or_save(pdf, fig):
if pdf_output:
pdf.savefig(fig)
plt.close(fig)
def teardown_module():
if have_matplotlib:
plt.close('all')
if pdf_output:
pdf.close()
class CheckModelResultsMixin(object):
'''
res2 should be either the results from RModelWrap
or the results as defined in model_results_data
'''
decimal_params = DECIMAL_4
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params,
self.decimal_params)
decimal_bse = DECIMAL_4
def test_standard_errors(self):
assert_almost_equal(self.res1.bse, self.res2.bse, self.decimal_bse)
decimal_resids = DECIMAL_4
def test_residuals(self):
resids = np.column_stack((self.res1.resid_pearson,
self.res1.resid_deviance, self.res1.resid_working,
self.res1.resid_anscombe, self.res1.resid_response))
assert_almost_equal(resids, self.res2.resids, self.decimal_resids)
decimal_aic_R = DECIMAL_4
def test_aic_R(self):
# R includes the estimation of the scale as a lost dof
# Doesn't with Gamma though
if self.res1.scale != 1:
dof = 2
else:
dof = 0
assert_almost_equal(self.res1.aic+dof, self.res2.aic_R,
self.decimal_aic_R)
decimal_aic_Stata = DECIMAL_4
def test_aic_Stata(self):
# Stata uses the below llf for aic definition for these families
if isinstance(self.res1.model.family, (sm.families.Gamma,
sm.families.InverseGaussian)):
llf = self.res1.model.family.loglike(self.res1.model.endog,
self.res1.mu, scale=1)
aic = (-2*llf+2*(self.res1.df_model+1))/self.res1.nobs
else:
aic = self.res1.aic/self.res1.nobs
assert_almost_equal(aic, self.res2.aic_Stata, self.decimal_aic_Stata)
decimal_deviance = DECIMAL_4
def test_deviance(self):
assert_almost_equal(self.res1.deviance, self.res2.deviance,
self.decimal_deviance)
decimal_scale = DECIMAL_4
def test_scale(self):
assert_almost_equal(self.res1.scale, self.res2.scale,
self.decimal_scale)
decimal_loglike = DECIMAL_4
def test_loglike(self):
# Stata uses the below llf for these families
# We differ with R for them
if isinstance(self.res1.model.family, (sm.families.Gamma,
sm.families.InverseGaussian)):
llf = self.res1.model.family.loglike(self.res1.model.endog,
self.res1.mu, scale=1)
else:
llf = self.res1.llf
assert_almost_equal(llf, self.res2.llf, self.decimal_loglike)
decimal_null_deviance = DECIMAL_4
def test_null_deviance(self):
assert_almost_equal(self.res1.null_deviance, self.res2.null_deviance,
self.decimal_null_deviance)
decimal_bic = DECIMAL_4
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic_Stata,
self.decimal_bic)
def test_degrees(self):
assert_equal(self.res1.model.df_resid,self.res2.df_resid)
decimal_fittedvalues = DECIMAL_4
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues, self.res2.fittedvalues,
self.decimal_fittedvalues)
def test_tpvalues(self):
# test comparing tvalues and pvalues with normal implementation
# make sure they use normal distribution (inherited in results class)
params = self.res1.params
tvalues = params / self.res1.bse
pvalues = stats.norm.sf(np.abs(tvalues)) * 2
half_width = stats.norm.isf(0.025) * self.res1.bse
conf_int = np.column_stack((params - half_width, params + half_width))
assert_almost_equal(self.res1.tvalues, tvalues)
assert_almost_equal(self.res1.pvalues, pvalues)
assert_almost_equal(self.res1.conf_int(), conf_int)
class CheckComparisonMixin(object):
def test_compare_discrete(self):
res1 = self.res1
resd = self.resd
assert_allclose(res1.llf, resd.llf, rtol=1e-10)
score_obs1 = res1.model.score_obs(res1.params)
score_obsd = resd.model.score_obs(resd.params)
assert_allclose(score_obs1, score_obsd, rtol=1e-10)
# score
score1 = res1.model.score(res1.params)
assert_allclose(score1, score_obs1.sum(0), atol=1e-20)
assert_allclose(score1, np.zeros(score_obs1.shape[1]), atol=1e-7)
hessian1 = res1.model.hessian(res1.params, observed=False)
hessiand = resd.model.hessian(resd.params)
assert_allclose(hessian1, hessiand, rtol=1e-10)
hessian1 = res1.model.hessian(res1.params, observed=True)
hessiand = resd.model.hessian(resd.params)
assert_allclose(hessian1, hessiand, rtol=1e-9)
def test_score_test(self):
res1 = self.res1
# fake example, should be zero, k_constraint should be 0
st, pv, df = res1.model.score_test(res1.params, k_constraints=1)
assert_allclose(st, 0, atol=1e-20)
assert_allclose(pv, 1, atol=1e-10)
assert_equal(df, 1)
st, pv, df = res1.model.score_test(res1.params, k_constraints=0)
assert_allclose(st, 0, atol=1e-20)
assert_(np.isnan(pv), msg=repr(pv))
assert_equal(df, 0)
# TODO: no verified numbers largely SMOKE test
exog_extra = res1.model.exog[:,1]**2
st, pv, df = res1.model.score_test(res1.params, exog_extra=exog_extra)
assert_array_less(0.1, st)
assert_array_less(0.1, pv)
assert_equal(df, 1)
class TestGlmGaussian(CheckModelResultsMixin):
def __init__(self):
'''
Test Gaussian family with canonical identity link
'''
# Test Precisions
self.decimal_resids = DECIMAL_3
self.decimal_params = DECIMAL_2
self.decimal_bic = DECIMAL_0
self.decimal_bse = DECIMAL_3
from statsmodels.datasets.longley import load
self.data = load()
self.data.exog = add_constant(self.data.exog, prepend=False)
self.res1 = GLM(self.data.endog, self.data.exog,
family=sm.families.Gaussian()).fit()
from .results.results_glm import Longley
self.res2 = Longley()
def test_compare_OLS(self):
res1 = self.res1
# OLS doesn't define score_obs
from statsmodels.regression.linear_model import OLS
resd = OLS(self.data.endog, self.data.exog).fit()
self.resd = resd # attach to access from the outside
assert_allclose(res1.llf, resd.llf, rtol=1e-10)
score_obs1 = res1.model.score_obs(res1.params, scale=None)
score_obsd = resd.resid[:, None] / resd.scale * resd.model.exog
# low precision because of badly scaled exog
assert_allclose(score_obs1, score_obsd, rtol=1e-8)
score_obs1 = res1.model.score_obs(res1.params, scale=1)
score_obsd = resd.resid[:, None] * resd.model.exog
assert_allclose(score_obs1, score_obsd, rtol=1e-8)
hess_obs1 = res1.model.hessian(res1.params, scale=None)
hess_obsd = -1. / resd.scale * resd.model.exog.T.dot(resd.model.exog)
# low precision because of badly scaled exog
assert_allclose(hess_obs1, hess_obsd, rtol=1e-8)
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# Gauss = r.gaussian
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm, family=Gauss)
# self.res2.resids = np.array(self.res2.resid)[:,None]*np.ones((1,5))
# self.res2.null_deviance = 185008826 # taken from R. Rpy bug?
class TestGaussianLog(CheckModelResultsMixin):
def __init__(self):
# Test Precision
self.decimal_aic_R = DECIMAL_0
self.decimal_aic_Stata = DECIMAL_2
self.decimal_loglike = DECIMAL_0
self.decimal_null_deviance = DECIMAL_1
nobs = 100
x = np.arange(nobs)
np.random.seed(54321)
# y = 1.0 - .02*x - .001*x**2 + 0.001 * np.random.randn(nobs)
self.X = np.c_[np.ones((nobs,1)),x,x**2]
self.lny = np.exp(-(-1.0 + 0.02*x + 0.0001*x**2)) +\
0.001 * np.random.randn(nobs)
GaussLog_Model = GLM(self.lny, self.X, \
family=sm.families.Gaussian(sm.families.links.log))
self.res1 = GaussLog_Model.fit()
from .results.results_glm import GaussianLog
self.res2 = GaussianLog()
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed"
# GaussLogLink = r.gaussian(link = "log")
# GaussLog_Res_R = RModel(self.lny, self.X, r.glm, family=GaussLogLink)
# self.res2 = GaussLog_Res_R
class TestGaussianInverse(CheckModelResultsMixin):
def __init__(self):
# Test Precisions
self.decimal_bic = DECIMAL_1
self.decimal_aic_R = DECIMAL_1
self.decimal_aic_Stata = DECIMAL_3
self.decimal_loglike = DECIMAL_1
self.decimal_resids = DECIMAL_3
nobs = 100
x = np.arange(nobs)
np.random.seed(54321)
y = 1.0 + 2.0 * x + x**2 + 0.1 * np.random.randn(nobs)
self.X = np.c_[np.ones((nobs,1)),x,x**2]
self.y_inv = (1. + .02*x + .001*x**2)**-1 + .001 * np.random.randn(nobs)
InverseLink_Model = GLM(self.y_inv, self.X,
family=sm.families.Gaussian(sm.families.links.inverse_power))
InverseLink_Res = InverseLink_Model.fit()
self.res1 = InverseLink_Res
from .results.results_glm import GaussianInverse
self.res2 = GaussianInverse()
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# InverseLink = r.gaussian(link = "inverse")
# InverseLink_Res_R = RModel(self.y_inv, self.X, r.glm, family=InverseLink)
# self.res2 = InverseLink_Res_R
class TestGlmBinomial(CheckModelResultsMixin):
def __init__(self):
'''
Test Binomial family with canonical logit link using star98 dataset.
'''
self.decimal_resids = DECIMAL_1
self.decimal_bic = DECIMAL_2
from statsmodels.datasets.star98 import load
from .results.results_glm import Star98
data = load()
data.exog = add_constant(data.exog, prepend=False)
self.res1 = GLM(data.endog, data.exog, \
family=sm.families.Binomial()).fit()
#NOTE: if you want to replicate with RModel
#res2 = RModel(data.endog[:,0]/trials, data.exog, r.glm,
# family=r.binomial, weights=trials)
self.res2 = Star98()
#TODO:
#Non-Canonical Links for the Binomial family require the algorithm to be
#slightly changed
#class TestGlmBinomialLog(CheckModelResultsMixin):
# pass
#class TestGlmBinomialLogit(CheckModelResultsMixin):
# pass
#class TestGlmBinomialProbit(CheckModelResultsMixin):
# pass
#class TestGlmBinomialCloglog(CheckModelResultsMixin):
# pass
#class TestGlmBinomialPower(CheckModelResultsMixin):
# pass
#class TestGlmBinomialLoglog(CheckModelResultsMixin):
# pass
#class TestGlmBinomialLogc(CheckModelResultsMixin):
#TODO: need include logc link
# pass
class TestGlmBernoulli(CheckModelResultsMixin, CheckComparisonMixin):
def __init__(self):
from .results.results_glm import Lbw
self.res2 = Lbw()
self.res1 = GLM(self.res2.endog, self.res2.exog,
family=sm.families.Binomial()).fit()
modd = discrete.Logit(self.res2.endog, self.res2.exog)
self.resd = modd.fit(start_params=self.res1.params * 0.9, disp=False)
def score_test_r(self):
res1 = self.res1
res2 = self.res2
st, pv, df = res1.model.score_test(res1.params,
exog_extra=res1.model.exog[:, 1]**2)
st_res = 0.2837680293459376 # (-0.5326988167303712)**2
assert_allclose(st, st_res, rtol=1e-4)
st, pv, df = res1.model.score_test(res1.params,
exog_extra=res1.model.exog[:, 0]**2)
st_res = 0.6713492821514992 # (-0.8193590679009413)**2
assert_allclose(st, st_res, rtol=1e-4)
select = list(range(9))
select.pop(7)
res1b = GLM(res2.endog, res2.exog[:, select],
family=sm.families.Binomial()).fit()
tres = res1b.model.score_test(res1b.params,
exog_extra=res1.model.exog[:, -2])
tres = np.asarray(tres[:2]).ravel()
tres_r = (2.7864148487452, 0.0950667)
assert_allclose(tres, tres_r, rtol=1e-4)
cmd_r = """\
data = read.csv("...statsmodels\\statsmodels\\genmod\\tests\\results\\stata_lbw_glm.csv")
data["race_black"] = data["race"] == "black"
data["race_other"] = data["race"] == "other"
mod = glm(low ~ age + lwt + race_black + race_other + smoke + ptl + ht + ui, family=binomial, data=data)
options(digits=16)
anova(mod, test="Rao")
library(statmod)
s = glm.scoretest(mod, data["age"]**2)
s**2
s = glm.scoretest(mod, data["lwt"]**2)
s**2
"""
#class TestGlmBernoulliIdentity(CheckModelResultsMixin):
# pass
#class TestGlmBernoulliLog(CheckModelResultsMixin):
# pass
#class TestGlmBernoulliProbit(CheckModelResultsMixin):
# pass
#class TestGlmBernoulliCloglog(CheckModelResultsMixin):
# pass
#class TestGlmBernoulliPower(CheckModelResultsMixin):
# pass
#class TestGlmBernoulliLoglog(CheckModelResultsMixin):
# pass
#class test_glm_bernoulli_logc(CheckModelResultsMixin):
# pass
class TestGlmGamma(CheckModelResultsMixin):
def __init__(self):
'''
Tests Gamma family with canonical inverse link (power -1)
'''
# Test Precisions
self.decimal_aic_R = -1 #TODO: off by about 1, we are right with Stata
self.decimal_resids = DECIMAL_2
from statsmodels.datasets.scotland import load
from .results.results_glm import Scotvote
data = load()
data.exog = add_constant(data.exog, prepend=False)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res1 = GLM(data.endog, data.exog,
family=sm.families.Gamma()).fit()
self.res1 = res1
# res2 = RModel(data.endog, data.exog, r.glm, family=r.Gamma)
res2 = Scotvote()
res2.aic_R += 2 # R doesn't count degree of freedom for scale with gamma
self.res2 = res2
class TestGlmGammaLog(CheckModelResultsMixin):
def __init__(self):
# Test Precisions
self.decimal_resids = DECIMAL_3
self.decimal_aic_R = DECIMAL_0
self.decimal_fittedvalues = DECIMAL_3
from .results.results_glm import CancerLog
res2 = CancerLog()
self.res1 = GLM(res2.endog, res2.exog,
family=sm.families.Gamma(link=sm.families.links.log)).fit()
self.res2 = res2
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm,
# family=r.Gamma(link="log"))
# self.res2.null_deviance = 27.92207137420696 # From R (bug in rpy)
# self.res2.bic = -154.1582089453923 # from Stata
class TestGlmGammaIdentity(CheckModelResultsMixin):
def __init__(self):
# Test Precisions
self.decimal_resids = -100 #TODO Very off from Stata?
self.decimal_params = DECIMAL_2
self.decimal_aic_R = DECIMAL_0
self.decimal_loglike = DECIMAL_1
from .results.results_glm import CancerIdentity
res2 = CancerIdentity()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.res1 = GLM(res2.endog, res2.exog,
family=sm.families.Gamma(link=sm.families.links.identity)).fit()
self.res2 = res2
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm,
# family=r.Gamma(link="identity"))
# self.res2.null_deviance = 27.92207137420696 # from R, Rpy bug
class TestGlmPoisson(CheckModelResultsMixin, CheckComparisonMixin):
def __init__(self):
'''
Tests Poisson family with canonical log link.
Test results were obtained by R.
'''
from .results.results_glm import Cpunish
from statsmodels.datasets.cpunish import load
self.data = load()
self.data.exog[:,3] = np.log(self.data.exog[:,3])
self.data.exog = add_constant(self.data.exog, prepend=False)
self.res1 = GLM(self.data.endog, self.data.exog,
family=sm.families.Poisson()).fit()
self.res2 = Cpunish()
# compare with discrete, start close to save time
modd = discrete.Poisson(self.data.endog, self.data.exog)
self.resd = modd.fit(start_params=self.res1.params * 0.9, disp=False)
#class TestGlmPoissonIdentity(CheckModelResultsMixin):
# pass
#class TestGlmPoissonPower(CheckModelResultsMixin):
# pass
class TestGlmInvgauss(CheckModelResultsMixin):
def __init__(self):
'''
Tests the Inverse Gaussian family in GLM.
Notes
-----
Used the rndivgx.ado file provided by Hardin and Hilbe to
generate the data. Results are read from model_results, which
were obtained by running R_ig.s
'''
# Test Precisions
self.decimal_aic_R = DECIMAL_0
self.decimal_loglike = DECIMAL_0
from .results.results_glm import InvGauss
res2 = InvGauss()
res1 = GLM(res2.endog, res2.exog, \
family=sm.families.InverseGaussian()).fit()
self.res1 = res1
self.res2 = res2
class TestGlmInvgaussLog(CheckModelResultsMixin):
def __init__(self):
# Test Precisions
self.decimal_aic_R = -10 # Big difference vs R.
self.decimal_resids = DECIMAL_3
from .results.results_glm import InvGaussLog
res2 = InvGaussLog()
self.res1 = GLM(res2.endog, res2.exog,
family=sm.families.InverseGaussian(link=\
sm.families.links.log)).fit()
self.res2 = res2
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm,
# family=r.inverse_gaussian(link="log"))
# self.res2.null_deviance = 335.1539777981053 # from R, Rpy bug
# self.res2.llf = -12162.72308 # from Stata, R's has big rounding diff
class TestGlmInvgaussIdentity(CheckModelResultsMixin):
def __init__(self):
# Test Precisions
self.decimal_aic_R = -10 #TODO: Big difference vs R
self.decimal_fittedvalues = DECIMAL_3
self.decimal_params = DECIMAL_3
from .results.results_glm import Medpar1
data = Medpar1()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.res1 = GLM(data.endog, data.exog,
family=sm.families.InverseGaussian(
link=sm.families.links.identity)).fit()
from .results.results_glm import InvGaussIdentity
self.res2 = InvGaussIdentity()
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm,
# family=r.inverse_gaussian(link="identity"))
# self.res2.null_deviance = 335.1539777981053 # from R, Rpy bug
# self.res2.llf = -12163.25545 # from Stata, big diff with R
class TestGlmNegbinomial(CheckModelResultsMixin):
def __init__(self):
'''
Test Negative Binomial family with canonical log link
'''
# Test Precision
self.decimal_resid = DECIMAL_1
self.decimal_params = DECIMAL_3
self.decimal_resids = -1 # 1 % mismatch at 0
self.decimal_fittedvalues = DECIMAL_1
from statsmodels.datasets.committee import load
self.data = load()
self.data.exog[:,2] = np.log(self.data.exog[:,2])
interaction = self.data.exog[:,2]*self.data.exog[:,1]
self.data.exog = np.column_stack((self.data.exog,interaction))
self.data.exog = add_constant(self.data.exog, prepend=False)
self.res1 = GLM(self.data.endog, self.data.exog,
family=sm.families.NegativeBinomial()).fit()
from .results.results_glm import Committee
res2 = Committee()
res2.aic_R += 2 # They don't count a degree of freedom for the scale
self.res2 = res2
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed"
# r.library('MASS') # this doesn't work when done in rmodelwrap?
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm,
# family=r.negative_binomial(1))
# self.res2.null_deviance = 27.8110469364343
#class TestGlmNegbinomial_log(CheckModelResultsMixin):
# pass
#class TestGlmNegbinomial_power(CheckModelResultsMixin):
# pass
#class TestGlmNegbinomial_nbinom(CheckModelResultsMixin):
# pass
#NOTE: hacked together version to test poisson offset
class TestGlmPoissonOffset(CheckModelResultsMixin):
@classmethod
def setupClass(cls):
from .results.results_glm import Cpunish
from statsmodels.datasets.cpunish import load
data = load()
data.exog[:,3] = np.log(data.exog[:,3])
data.exog = add_constant(data.exog, prepend=False)
exposure = [100] * len(data.endog)
cls.data = data
cls.exposure = exposure
cls.res1 = GLM(data.endog, data.exog, family=sm.families.Poisson(),
exposure=exposure).fit()
cls.res1.params[-1] += np.log(100) # add exposure back in to param
# to make the results the same
cls.res2 = Cpunish()
def test_missing(self):
# make sure offset is dropped correctly
endog = self.data.endog.copy()
endog[[2,4,6,8]] = np.nan
mod = GLM(endog, self.data.exog, family=sm.families.Poisson(),
exposure=self.exposure, missing='drop')
assert_equal(mod.exposure.shape[0], 13)
def test_offset_exposure(self):
# exposure=x and offset=log(x) should have the same effect
np.random.seed(382304)
endog = np.random.randint(0, 10, 100)
exog = np.random.normal(size=(100,3))
exposure = np.random.uniform(1, 2, 100)
offset = np.random.uniform(1, 2, 100)
mod1 = GLM(endog, exog, family=sm.families.Poisson(),
offset=offset, exposure=exposure).fit()
offset2 = offset + np.log(exposure)
mod2 = GLM(endog, exog, family=sm.families.Poisson(),
offset=offset2).fit()
assert_almost_equal(mod1.params, mod2.params)
# test recreating model
mod1_ = mod1.model
kwds = mod1_._get_init_kwds()
assert_allclose(kwds['exposure'], exposure, rtol=1e-14)
assert_allclose(kwds['offset'], mod1_.offset, rtol=1e-14)
mod3 = mod1_.__class__(mod1_.endog, mod1_.exog, **kwds)
assert_allclose(mod3.exposure, mod1_.exposure, rtol=1e-14)
assert_allclose(mod3.offset, mod1_.offset, rtol=1e-14)
def test_predict(self):
np.random.seed(382304)
endog = np.random.randint(0, 10, 100)
exog = np.random.normal(size=(100,3))
exposure = np.random.uniform(1, 2, 100)
mod1 = GLM(endog, exog, family=sm.families.Poisson(),
exposure=exposure).fit()
exog1 = np.random.normal(size=(10,3))
exposure1 = np.random.uniform(1, 2, 10)
# Doubling exposure time should double expected response
pred1 = mod1.predict(exog=exog1, exposure=exposure1)
pred2 = mod1.predict(exog=exog1, exposure=2*exposure1)
assert_almost_equal(pred2, 2*pred1)
# Check exposure defaults
pred3 = mod1.predict()
pred4 = mod1.predict(exposure=exposure)
pred5 = mod1.predict(exog=exog, exposure=exposure)
assert_almost_equal(pred3, pred4)
assert_almost_equal(pred4, pred5)
# Check offset defaults
offset = np.random.uniform(1, 2, 100)
mod2 = GLM(endog, exog, offset=offset, family=sm.families.Poisson()).fit()
pred1 = mod2.predict()
pred2 = mod2.predict(offset=offset)
pred3 = mod2.predict(exog=exog, offset=offset)
assert_almost_equal(pred1, pred2)
assert_almost_equal(pred2, pred3)
# Check that offset shifts the linear predictor
mod3 = GLM(endog, exog, family=sm.families.Poisson()).fit()
offset = np.random.uniform(1, 2, 10)
pred1 = mod3.predict(exog=exog1, offset=offset, linear=True)
pred2 = mod3.predict(exog=exog1, offset=2*offset, linear=True)
assert_almost_equal(pred2, pred1+offset)
def test_prefect_pred():
cur_dir = os.path.dirname(os.path.abspath(__file__))
iris = np.genfromtxt(os.path.join(cur_dir, 'results', 'iris.csv'),
delimiter=",", skip_header=1)
y = iris[:,-1]
X = iris[:,:-1]
X = X[y != 2]
y = y[y != 2]
X = add_constant(X, prepend=True)
glm = GLM(y, X, family=sm.families.Binomial())
assert_raises(PerfectSeparationError, glm.fit)
def test_score_test_OLS():
# nicer example than Longley
from statsmodels.regression.linear_model import OLS
np.random.seed(5)
nobs = 100
sige = 0.5
x = np.random.uniform(0, 1, size=(nobs, 5))
x[:, 0] = 1
beta = 1. / np.arange(1., x.shape[1] + 1)
y = x.dot(beta) + sige * np.random.randn(nobs)
res_ols = OLS(y, x).fit()
res_olsc = OLS(y, x[:, :-2]).fit()
co = res_ols.compare_lm_test(res_olsc, demean=False)
res_glm = GLM(y, x[:, :-2], family=sm.families.Gaussian()).fit()
co2 = res_glm.model.score_test(res_glm.params, exog_extra=x[:, -2:])
# difference in df_resid versus nobs in scale see #1786
assert_allclose(co[0] * 97 / 100., co2[0], rtol=1e-13)
def test_attribute_writable_resettable():
# Regression test for mutables and class constructors.
data = sm.datasets.longley.load()
endog, exog = data.endog, data.exog
glm_model = sm.GLM(endog, exog)
assert_equal(glm_model.family.link.power, 1.0)
glm_model.family.link.power = 2.
assert_equal(glm_model.family.link.power, 2.0)
glm_model2 = sm.GLM(endog, exog)
assert_equal(glm_model2.family.link.power, 1.0)
class Test_start_params(CheckModelResultsMixin):
def __init__(self):
'''
Test Gaussian family with canonical identity link
'''
# Test Precisions
self.decimal_resids = DECIMAL_3
self.decimal_params = DECIMAL_2
self.decimal_bic = DECIMAL_0
self.decimal_bse = DECIMAL_3
from statsmodels.datasets.longley import load
self.data = load()
self.data.exog = add_constant(self.data.exog, prepend=False)
params = sm.OLS(self.data.endog, self.data.exog).fit().params
self.res1 = GLM(self.data.endog, self.data.exog,
family=sm.families.Gaussian()).fit(start_params=params)
from .results.results_glm import Longley
self.res2 = Longley()
def test_glm_start_params():
# see 1604
y2 = np.array('0 1 0 0 0 1'.split(), int)
wt = np.array([50,1,50,1,5,10])
y2 = np.repeat(y2, wt)
x2 = np.repeat([0,0,0.001,100,-1,-1], wt)
mod = sm.GLM(y2, sm.add_constant(x2), family=sm.families.Binomial())
res = mod.fit(start_params=[-4, -5])
np.testing.assert_almost_equal(res.params, [-4.60305022, -5.29634545], 6)
def test_loglike_no_opt():
# see 1728
y = np.asarray([0, 1, 0, 0, 1, 1, 0, 1, 1, 1])
x = np.arange(10, dtype=np.float64)
def llf(params):
lin_pred = params[0] + params[1]*x
pr = 1 / (1 + np.exp(-lin_pred))
return np.sum(y*np.log(pr) + (1-y)*np.log(1-pr))
for params in [0,0], [0,1], [0.5,0.5]:
mod = sm.GLM(y, sm.add_constant(x), family=sm.families.Binomial())
res = mod.fit(start_params=params, maxiter=0)
like = llf(params)
assert_almost_equal(like, res.llf)
def test_formula_missing_exposure():
# see 2083
import statsmodels.formula.api as smf
import pandas as pd
d = {'Foo': [1, 2, 10, 149], 'Bar': [1, 2, 3, np.nan],
'constant': [1] * 4, 'exposure' : np.random.uniform(size=4),
'x': [1, 3, 2, 1.5]}
df = pd.DataFrame(d)
family = sm.families.Gaussian(link=sm.families.links.log)
mod = smf.glm("Foo ~ Bar", data=df, exposure=df.exposure,
family=family)
assert_(type(mod.exposure) is np.ndarray, msg='Exposure is not ndarray')
exposure = pd.Series(np.random.uniform(size=5))
assert_raises(ValueError, smf.glm, "Foo ~ Bar", data=df,
exposure=exposure, family=family)
assert_raises(ValueError, GLM, df.Foo, df[['constant', 'Bar']],
exposure=exposure, family=family)
@dec.skipif(not have_matplotlib)
def test_plots():
np.random.seed(378)
n = 200
exog = np.random.normal(size=(n, 2))
lin_pred = exog[:, 0] + exog[:, 1]**2
prob = 1 / (1 + np.exp(-lin_pred))
endog = 1 * (np.random.uniform(size=n) < prob)
model = sm.GLM(endog, exog, family=sm.families.Binomial())
result = model.fit()
import matplotlib.pyplot as plt
import pandas as pd
from statsmodels.graphics.regressionplots import add_lowess
# array interface
for j in 0,1:
fig = result.plot_added_variable(j)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
fig = result.plot_partial_residuals(j)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
fig = result.plot_ceres_residuals(j)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
# formula interface
data = pd.DataFrame({"y": endog, "x1": exog[:, 0], "x2": exog[:, 1]})
model = sm.GLM.from_formula("y ~ x1 + x2", data, family=sm.families.Binomial())
result = model.fit()
for j in 0,1:
xname = ["x1", "x2"][j]
fig = result.plot_added_variable(xname)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
fig = result.plot_partial_residuals(xname)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
fig = result.plot_ceres_residuals(xname)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
def gen_endog(lin_pred, family_class, link, binom_version=0):
np.random.seed(872)
fam = sm.families
mu = link().inverse(lin_pred)
if family_class == fam.Binomial:
if binom_version == 0:
endog = 1*(np.random.uniform(size=len(lin_pred)) < mu)
else:
endog = np.empty((len(lin_pred), 2))
n = 10
endog[:, 0] = (np.random.uniform(size=(len(lin_pred), n)) < mu[:, None]).sum(1)
endog[:, 1] = n - endog[:, 0]
elif family_class == fam.Poisson:
endog = np.random.poisson(mu)
elif family_class == fam.Gamma:
endog = np.random.gamma(2, mu)
elif family_class == fam.Gaussian:
endog = mu + np.random.normal(size=len(lin_pred))
elif family_class == fam.NegativeBinomial:
from scipy.stats.distributions import nbinom
endog = nbinom.rvs(mu, 0.5)
elif family_class == fam.InverseGaussian:
from scipy.stats.distributions import invgauss
endog = invgauss.rvs(mu)
else:
raise ValueError
return endog
def test_summary():
"""
Smoke test for summary.
"""
np.random.seed(4323)
n = 100
exog = np.random.normal(size=(n, 2))
exog[:, 0] = 1
endog = np.random.normal(size=n)
for method in "irls", "cg":
fa = sm.families.Gaussian()
model = sm.GLM(endog, exog, family=fa)
rslt = model.fit(method=method)
s = rslt.summary()
def test_gradient_irls():
"""
Compare the results when using gradient optimization and IRLS.
"""
# TODO: Find working examples for inverse_squared link
np.random.seed(87342)
fam = sm.families
lnk = sm.families.links
families = [(fam.Binomial, [lnk.logit, lnk.probit, lnk.cloglog, lnk.log, lnk.cauchy]),
(fam.Poisson, [lnk.log, lnk.identity, lnk.sqrt]),
(fam.Gamma, [lnk.log, lnk.identity, lnk.inverse_power]),
(fam.Gaussian, [lnk.identity, lnk.log, lnk.inverse_power]),
(fam.InverseGaussian, [lnk.log, lnk.identity, lnk.inverse_power, lnk.inverse_squared]),
(fam.NegativeBinomial, [lnk.log, lnk.inverse_power, lnk.inverse_squared, lnk.identity])]
n = 100
p = 3
exog = np.random.normal(size=(n, p))
exog[:, 0] = 1
for family_class, family_links in families:
for link in family_links:
for binom_version in 0,1:
if family_class != fam.Binomial and binom_version == 1:
continue
if (family_class, link) == (fam.Poisson, lnk.identity):
lin_pred = 20 + exog.sum(1)
elif (family_class, link) == (fam.Binomial, lnk.log):
lin_pred = -1 + exog.sum(1) / 8
elif (family_class, link) == (fam.Poisson, lnk.sqrt):
lin_pred = 2 + exog.sum(1)
elif (family_class, link) == (fam.InverseGaussian, lnk.log):
lin_pred = -1 + exog.sum(1)
elif (family_class, link) == (fam.InverseGaussian, lnk.identity):
lin_pred = 20 + 5*exog.sum(1)
lin_pred = np.clip(lin_pred, 1e-4, np.inf)
elif (family_class, link) == (fam.InverseGaussian, lnk.inverse_squared):
lin_pred = 0.5 + exog.sum(1) / 5
continue # skip due to non-convergence
elif (family_class, link) == (fam.InverseGaussian, lnk.inverse_power):
lin_pred = 1 + exog.sum(1) / 5
elif (family_class, link) == (fam.NegativeBinomial, lnk.identity):
lin_pred = 20 + 5*exog.sum(1)
lin_pred = np.clip(lin_pred, 1e-4, np.inf)
elif (family_class, link) == (fam.NegativeBinomial, lnk.inverse_squared):
lin_pred = 0.1 + np.random.uniform(size=exog.shape[0])
continue # skip due to non-convergence
elif (family_class, link) == (fam.NegativeBinomial, lnk.inverse_power):
lin_pred = 1 + exog.sum(1) / 5
else:
lin_pred = np.random.uniform(size=exog.shape[0])
endog = gen_endog(lin_pred, family_class, link, binom_version)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod_irls = sm.GLM(endog, exog, family=family_class(link=link))
rslt_irls = mod_irls.fit(method="IRLS")
# Try with and without starting values.
for max_start_irls, start_params in (0, rslt_irls.params), (1, None):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod_gradient = sm.GLM(endog, exog, family=family_class(link=link))
rslt_gradient = mod_gradient.fit(max_start_irls=max_start_irls,
start_params=start_params,
method="newton")
assert_allclose(rslt_gradient.params,
rslt_irls.params, rtol=1e-6, atol=1e-6)
assert_allclose(rslt_gradient.llf, rslt_irls.llf,
rtol=1e-6, atol=1e-6)
assert_allclose(rslt_gradient.scale, rslt_irls.scale,
rtol=1e-6, atol=1e-6)
# Get the standard errors using expected information.
gradient_bse = rslt_gradient.bse
ehess = mod_gradient.hessian(rslt_gradient.params, observed=False)
gradient_bse = np.sqrt(-np.diag(np.linalg.inv(ehess)))
assert_allclose(gradient_bse, rslt_irls.bse, rtol=1e-6, atol=1e-6)
if __name__=="__main__":
#run_module_suite()
#taken from Fernando Perez:
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb'],
exit=False)
| bsd-3-clause |
jiangzhonglian/MachineLearning | src/py2.x/ml/5.Logistic/sklearn_logisticRegression_demo.py | 1 | 7834 | #!/usr/bin/python
# coding: utf8
'''
Created on Oct 27, 2010
Update on 2017-05-18
Logistic Regression Working Module
Author: 小瑶
GitHub: https://github.com/apachecn/AiLearning
scikit-learn的例子地址:http://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
'''
# 逻辑回归中的 L1 惩罚和稀缺性 L1 Penalty and Sparsity in Logistic Regression
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# 将大小数字分类为小
y = (y > 4).astype(np.int)
# 设置正则化参数
for i, C in enumerate((100, 1, 0.01)):
# 减少训练时间短的容忍度
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
# 由于 L1 稀疏诱导规范,coef_l1_LR 包含零
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
'''
# 具有 L1-逻辑回归的路径
'''
print(__doc__)
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
'''
# 绘制多项式和一对二的逻辑回归 Plot multinomial and One-vs-Rest Logistic Regression
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.linear_model import LogisticRegression
# 制作 3 类数据集进行分类
centers = [[-5, 0], [0, 1.5], [5, -1]]
X, y = make_blobs(n_samples=1000, centers=centers, random_state=40)
transformation = [[0.4, 0.2], [-0.4, 1.2]]
X = np.dot(X, transformation)
for multi_class in ('multinomial', 'ovr'):
clf = LogisticRegression(solver='sag', max_iter=100, random_state=42,
multi_class=multi_class).fit(X, y)
# 打印训练分数
print("training score : %.3f (%s)" % (clf.score(X, y), multi_class))
# 创建一个网格来绘制
h = .02 # 网格中的步长
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# 绘制决策边界。为此,我们将为网格 [x_min, x_max]x[y_min, y_max]中的每个点分配一个颜色。
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# 将结果放入彩色图
Z = Z.reshape(xx.shape)
plt.figure()
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.title("Decision surface of LogisticRegression (%s)" % multi_class)
plt.axis('tight')
# 将训练点也绘制进入
colors = "bry"
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, cmap=plt.cm.Paired)
# 绘制三个一对数分类器
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.show()
'''
from __future__ import print_function
# Logistic Regression 3-class Classifier 逻辑回归 3-类 分类器
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# 引入一些数据来玩
iris = datasets.load_iris()
# 我们只采用样本数据的前两个feature
X = iris.data[:, :2]
Y = iris.target
h = .02 # 网格中的步长
logreg = linear_model.LogisticRegression(C=1e5)
# 我们创建了一个 Neighbours Classifier 的实例,并拟合数据。
logreg.fit(X, Y)
# 绘制决策边界。为此我们将为网格 [x_min, x_max]x[y_min, y_max] 中的每个点分配一个颜色。
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# 将结果放入彩色图中
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# 将训练点也同样放入彩色图中
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
# Logistic function 逻辑回归函数
# 这个类似于咱们之前讲解 logistic 回归的 Sigmoid 函数,模拟的阶跃函数
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# 这是我们的测试集,它只是一条直线,带有一些高斯噪声。
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# 运行分类器
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# 并且画出我们的结果
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='red', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(range(-5, 10))
plt.yticks([0, 0.5, 1])
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.legend(('Logistic Regression Model', 'Linear Regression Model'),
loc="lower right", fontsize='small')
plt.show()
'''
| gpl-3.0 |
valexandersaulys/airbnb_kaggle_contest | prototype_alpha/gradientBoost_take3.py | 1 | 2967 | """
Take 2 on the GradientBoost, predicting for country_destinations.
Use labels in confusion_matrix(y_true,y_preds,labels=[]) to order
the labels in the confusion matrix to see whats overrepresented in the
target files for the Airbnb contest.
http://scikit-learn.org/stable/modules/generated/sklearn.metrics.confusion_matrix.html
'NDF' is over-represented, so I'm gonna drop it.
"""
import pandas as pd
import numpy as np
from sklearn.cross_validation import train_test_split
# Function to get that shit in order
def get_submission_format(IDs,ndfs,answers):
# IDs should be dataframe, ndfs & answers can be list/numpyarray
y_final = [];
for i in range(len(ndfs)):
if ndfs[i]==1: # Affirmative
y_final.append('NDF')
else:
y_final.append(answers[i]);
y_final = pd.DataFrame(y_final); # Check this, it might need to be reshaped
df = pd.concat([IDs,y_final],axis=1);
df.columns = ['id', 'country']
return df
training = pd.read_csv("protoAlpha_training.csv")
testing = pd.read_csv("protoAlpha_testing.csv")
X = training.iloc[:,1:-1]
y = training['country_destination']
x_train,x_valid,y_train,y_valid = train_test_split(X,y,test_size=0.3,random_state=None)
x_train['NDF'] = 0; x_valid['NDF'] = 0;
x_train['NDF'][ y_train['country_destination']=='NDF' ] = 1; # idk if this will work
x_valid['NDF'][ y_valid['country_destination']=='NDF' ] = 1;
# Get the 'NDF' column values
yn_train = x_train.iloc[:,:-1]
yn_valid = x_valid.iloc[:,:-1]
labels_order = np.unique(y_train.values)
# Drop the extra columns in x_train
x_train = x_train.iloc[:,:-1]
x_valid = x_valid.iloc[:,:-1]
# First train a classifier for 'NDF' vs everything else
from sklearn.ensemble import GradientBoostingClassifier
clf_one = GradientBoostingClassifier(n_estimators=10,verbose=100)
clf_one.fit(x_train,yn_train);
yn_preds = clf_one.predict(x_valid);
print( "Accuracy: %f" % accuracy_score(yn_valid,yn_preds) );
# Drop values that are 'NDF' destination
x_t = x_train[ y_train['country_destination'] == 'NDF' ]
x_v = x_valid[ y_valid['country_destination'] == 'NDF' ]
y_t = y_train['country_destination'][y_train['country_destination'] != 'NDF']
y_v = y_valid['country_destination'][y_valid['country_destination'] != 'NDF']
# Next, train a classifier for everything else
clf_two = GradientBoostingClassifier(n_estimators=70,verbose=10)
clf_two.fit(x_t,y_t)
y_p = clf_two.predict(x_v);
print( "Accuracy: %f" % accuracy_score(y_v,y_p) );
"""
# Full run-through for valid
ndf_answers = clf_one.predict(x_valid);
x_vld = x_valid[ndf_answers==1]
y_answers = clf_two.predict(x_vld);
"""
# Get the final testing data answer
x_test = testing.iloc[:,1:];
ndf_test = clf_one.predict(x_test);
x_tst = x_test[ndf_test==1]; # Might need to be a dataframe & not a numpy
y_answers = clf_two.predict(x_tst);
numbahs = testing['id']
df = get_submission_format(numbahs,ndfs=,answers=);
df.to_csv("gradientBoost_take3.csv",index=False)
| gpl-2.0 |
marcocaccin/scikit-learn | sklearn/neighbors/graph.py | 208 | 7031 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
poldrosky/alternar | wind/windRoseGeneral.py | 1 | 2130 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# windRoseGeneral.py
#
# Copyright 2015 Omar Ernesto Cabrera Rosero <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
# Use library windrose 1.5, https://github.com/scls19fr/windrose
import Pdbc
import time
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.cm as cm
import numpy as np
from math import pi
from windrose import WindroseAxes
t1 = time.time()
db = Pdbc.DBConnector('db', 'user', 'pass', 'localhost', 'port')
query = 'SELECT DISTINCT latitude, longitude FROM timeseries50 ORDER BY latitude, longitude LIMIT 1'
stations = db.resultQuery(query)
for station in stations:
ws = []
wd = []
query1 = 'SELECT * FROM timeseries50 WHERE latitude='+str(station[0])+' AND longitude='+str(station[1])+' ORDER BY timewind'
rows = db.resultQuery(query1)
for row in rows:
wd.append(row[6])
ws.append(row[7])
ws = np.array(ws)
wd = np.array(wd)
df = pd.DataFrame({"speed": ws, "direction": wd})
#windrose like a stacked histogram with normed (displayed in percent) results
ax = WindroseAxes.from_ax()
ax.bar(df.direction, df.speed, normed=True, opening=0.8, edgecolor='white')
ax.set_legend()
ax.set_title('Estación: '+str(station[0])+' , '+str(station[1]))
plt.show()
t2 = time.time()
print(t2-t1)
print('done')
| agpl-3.0 |
abhishekkrthakur/scikit-learn | sklearn/linear_model/tests/test_perceptron.py | 378 | 1815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| bsd-3-clause |
cameronlai/ml-class-python | skeletons/ex7/ex7_sklearn.py | 1 | 4158 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.cm as cm
import scipy.io as sio
from sklearn.cluster import KMeans
from ex7 import findClosestCentroids, computeCentroids
from ex7_utility import plotProgresskMeans
## Machine Learning Online Class - Exercise 7: Principle Component Analysis and K-Means Clustering
# Instructions
# ------------
#
# This file contains code that helps you get started on the
# clustering exercise.
# You will need to complete a short section of code to perform
# clustering with scikit-learn library
#
def runkMeans_sklearn(X, initial_centroids = None, max_iters= 0, plot_progress = False, input_K = 0):
m, n = X.shape
if initial_centroids is None:
K = input_K
else:
K = initial_centroids.shape[0]
idx = np.zeros((m,1))
kmeans = None
# ============= YOUR CODE HERE =============
# Instructions: Perform K Means with sci-kit library
# Initialize with the given points
# If initial_centroids is an integer, then use random
# ===========================================
if kmeans is None:
sys.exit('K Means model not initialized')
centroids = kmeans.cluster_centers_
idx = kmeans.labels_
if plot_progress:
plotProgresskMeans(X, centroids, initial_centroids, idx, K, max_iters)
return centroids, idx
if __name__ == "__main__":
plt.close('all')
plt.ion() # interactive mode
# ==================== Part 1: Perform K Means ====================
print('Finding closest centroids.')
data_file = '../../data/ex7/ex7data2.mat'
mat_content = sio.loadmat(data_file)
X = mat_content['X']
K = 3
initial_centroids = np.array([[3,3], [6,2], [8,5]])
idx = findClosestCentroids(X, initial_centroids)
print('Closest centroids for the first 3 examples:')
print(idx[:3].ravel());
print('(the closest centroids should be 0, 2, 1 respectively)')
raw_input('Program paused. Press enter to continue')
# =================== Part 2: Compute Means ===================
print('Computing centroids means.')
centroids = computeCentroids(X, idx, K);
print('Centroids computed after initial finding of closest centroids: ')
print(centroids);
print('(the centroids should be');
print(' [ 2.428301 3.157924 ]');
print(' [ 5.813503 2.633656 ]');
print(' [ 7.119387 3.616684 ]');
raw_input('Program paused. Press enter to continue')
# =================== Part 3: K-Means Clustering ===================
print('Running K-Means clustering on example dataset.')
max_iters = 10
print('K-means starting point')
plotProgresskMeans(X, initial_centroids, initial_centroids, idx, K, 0)
raw_input('Press enter to continue')
centroids, idx = runkMeans_sklearn(X, initial_centroids, max_iters, True)
print('K-Means Done.')
raw_input('Program paused. Press enter to continue')
# =================== Part 4: K-Means Clustering on Pixels ===================
print('Running K-Means clustering on pixels from an image.')
data_file = '../../data/ex7/bird_small.png'
A = mpimg.imread(data_file)
A = A / 255
img_size = A.shape
X = np.reshape(A, (img_size[0] * img_size[1], 3))
K = 16
max_iters = 10
centroids, idx = runkMeans_sklearn(X, max_iters=max_iters, input_K = K)
raw_input('Program paused. Press enter to continue')
# =================== Part 5: Image Compression ===================
print('Applying K-Means to compress an image.')
# Can use the idx trained from K Means instead of finding them again
#idx = findClosestCentroids(X, centroids)
X_recovered = centroids[idx,:]
X_recovered = np.reshape(X_recovered, (img_size[0], img_size[1], 3))
plt.figure()
plt.subplot(1,2,1)
plt.imshow(A * 255)
plt.title('Original')
plt.subplot(1,2,2)
plt.imshow(X_recovered * 255)
plt.title('Compressed, with %d colors.' % K)
raw_input('Program paused. Press enter to continue')
plt.close('all')
| mit |
astocko/statsmodels | statsmodels/datasets/macrodata/data.py | 25 | 3184 | """United States Macroeconomic data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain."""
TITLE = __doc__
SOURCE = """
Compiled by Skipper Seabold. All data are from the Federal Reserve Bank of St.
Louis [1] except the unemployment rate which was taken from the National
Bureau of Labor Statistics [2]. ::
[1] Data Source: FRED, Federal Reserve Economic Data, Federal Reserve Bank of
St. Louis; http://research.stlouisfed.org/fred2/; accessed December 15,
2009.
[2] Data Source: Bureau of Labor Statistics, U.S. Department of Labor;
http://www.bls.gov/data/; accessed December 15, 2009.
"""
DESCRSHORT = """US Macroeconomic Data for 1959Q1 - 2009Q3"""
DESCRLONG = DESCRSHORT
NOTE = """::
Number of Observations - 203
Number of Variables - 14
Variable name definitions::
year - 1959q1 - 2009q3
quarter - 1-4
realgdp - Real gross domestic product (Bil. of chained 2005 US$,
seasonally adjusted annual rate)
realcons - Real personal consumption expenditures (Bil. of chained
2005 US$, seasonally adjusted annual rate)
realinv - Real gross private domestic investment (Bil. of chained
2005 US$, seasonally adjusted annual rate)
realgovt - Real federal consumption expenditures & gross investment
(Bil. of chained 2005 US$, seasonally adjusted annual rate)
realdpi - Real private disposable income (Bil. of chained 2005
US$, seasonally adjusted annual rate)
cpi - End of the quarter consumer price index for all urban
consumers: all items (1982-84 = 100, seasonally adjusted).
m1 - End of the quarter M1 nominal money stock (Seasonally
adjusted)
tbilrate - Quarterly monthly average of the monthly 3-month
treasury bill: secondary market rate
unemp - Seasonally adjusted unemployment rate (%)
pop - End of the quarter total population: all ages incl. armed
forces over seas
infl - Inflation rate (ln(cpi_{t}/cpi_{t-1}) * 400)
realint - Real interest rate (tbilrate - infl)
"""
from numpy import recfromtxt, column_stack, array
from pandas import DataFrame
from statsmodels.datasets.utils import Dataset
from os.path import dirname, abspath
def load():
"""
Load the US macro data and return a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
Notes
-----
The macrodata Dataset instance does not contain endog and exog attributes.
"""
data = _get_data()
names = data.dtype.names
dataset = Dataset(data=data, names=names)
return dataset
def load_pandas():
dataset = load()
dataset.data = DataFrame(dataset.data)
return dataset
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/macrodata.csv', 'rb'), delimiter=",",
names=True, dtype=float)
return data
| bsd-3-clause |
dato-code/SFrame | oss_src/unity/python/sframe/test/test_sframe.py | 5 | 134915 | '''
Copyright (C) 2016 Turi
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
# from nose import with_setup
from ..data_structures.sframe import SFrame
from ..data_structures.sarray import SArray
from ..data_structures.image import Image
from ..connect import main as glconnect
from ..util import _assert_sframe_equal, generate_random_sframe
from .. import _launch, load_sframe, aggregate
from . import util
import pandas as pd
from ..util.timezone import GMT
from pandas.util.testing import assert_frame_equal
import unittest
import datetime as dt
import tempfile
import os
import csv
import gzip
import string
import time
import numpy as np
import array
import math
import random
import shutil
import functools
import sys
import mock
import sqlite3
from .dbapi2_mock import dbapi2_mock
HAS_PYSPARK = True
try:
from pyspark import SparkContext, SQLContext
except:
HAS_PYSPARK = False
#######################################################
# Metrics tracking tests are in test_usage_metrics.py #
#######################################################
# Taken from http://stackoverflow.com/questions/1151658/python-hashable-dicts
# by Alex Martelli
class hashabledict(dict):
def __key(self):
return tuple((k,self[k]) for k in sorted(self))
def __hash__(self):
return hash(self.__key())
def __eq__(self, other):
return self.__key() == other.__key()
class SFrameTest(unittest.TestCase):
def setUp(self):
self.int_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.float_data = [1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]
self.string_data = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"]
self.a_to_z = [str(chr(97 + i)) for i in range(0, 26)]
self.dataframe = pd.DataFrame({'int_data': self.int_data, 'float_data': self.float_data, 'string_data': self.string_data})
self.url = "http://s3-us-west-2.amazonaws.com/testdatasets/a_to_z.txt.gz"
self.int_data2 = range(50,60)
self.float_data2 = [1.0 * i for i in range(50,60)]
self.string_data2 = [str(i) for i in range(50,60)]
self.dataframe2 = pd.DataFrame({'int_data': self.int_data2, 'float_data': self.float_data2, 'string_data': self.string_data2})
self.vec_data = [array.array('d', [i, i+1]) for i in self.int_data]
self.list_data = [[i, str(i), i * 1.0] for i in self.int_data]
self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
self.datetime_data = [dt.datetime(2013, 5, 7, 10, 4, 10),
dt.datetime(1902, 10, 21, 10, 34, 10).replace(tzinfo=GMT(0.0))]
self.all_type_cols = [self.int_data,
self.float_data,
self.string_data,
self.vec_data,
self.list_data,
self.dict_data,
self.datetime_data*5]
self.sf_all_types = SFrame({"X"+str(i[0]):i[1] for i in zip(range(1,8),
self.all_type_cols)})
# Taken from http://en.wikipedia.org/wiki/Join_(SQL) for fun.
self.employees_sf = SFrame()
self.employees_sf.add_column(SArray(['Rafferty','Jones','Heisenberg','Robinson','Smith','John']), 'last_name')
self.employees_sf.add_column(SArray([31,33,33,34,34,None]), 'dep_id')
# XXX: below are only used by one test!
self.departments_sf = SFrame()
self.departments_sf.add_column(SArray([31,33,34,35]), 'dep_id')
self.departments_sf.add_column(SArray(['Sales','Engineering','Clerical','Marketing']), 'dep_name')
def __assert_sarray_equal(self, sa1, sa2):
l1 = list(sa1)
l2 = list(sa2)
self.assertEquals(len(l1), len(l2))
for i in range(len(l1)):
v1 = l1[i]
v2 = l2[i]
if v1 == None:
self.assertEqual(v2, None)
else:
if type(v1) == dict:
self.assertEquals(len(v1), len(v2))
for key in v1:
self.assertTrue(key in v1)
self.assertEqual(v1[key], v2[key])
elif (hasattr(v1, "__iter__")):
self.assertEquals(len(v1), len(v2))
for j in range(len(v1)):
t1 = v1[j]; t2 = v2[j]
if (type(t1) == float):
if (math.isnan(t1)):
self.assertTrue(math.isnan(t2))
else:
self.assertEquals(t1, t2)
else:
self.assertEquals(t1, t2)
else:
self.assertEquals(v1, v2)
def test_split_datetime(self):
from_zone = GMT(0)
to_zone = GMT(4.5)
utc = dt.datetime.strptime('2011-01-21 02:37:21', '%Y-%m-%d %H:%M:%S')
utc = utc.replace(tzinfo=from_zone)
central = utc.astimezone(to_zone)
sa = SArray([utc,central])
expected = SFrame()
expected ['X.year'] = [2011,2011]
expected ['X.month'] = [1,1]
expected ['X.day'] = [21,21]
expected ['X.hour'] = [2,7]
expected ['X.minute'] = [37,7]
expected ['X.second'] = [21,21]
expected ['X.tzone'] = [0.0,4.5]
result = sa.split_datetime(tzone=True)
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
# column names
expected = SFrame()
expected ['ttt.year'] = [2011,2011]
expected ['ttt.minute'] = [37,7]
expected ['ttt.second'] = [21,21]
result = sa.split_datetime(column_name_prefix='ttt',limit=['year','minute','second']);
self.assertEqual(result.column_names(), ['ttt.year', 'ttt.minute', 'ttt.second'])
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
sf = SFrame({'datetime': sa})
result = sf.split_datetime('datetime', column_name_prefix='ttt',limit=['year','minute','second']);
self.assertEqual(result.column_names(), ['ttt.year', 'ttt.minute', 'ttt.second'])
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
def __test_equal(self, sf, df):
self.assertEquals(sf.num_rows(), df.shape[0])
self.assertEquals(sf.num_cols(), df.shape[1])
assert_frame_equal(sf.to_dataframe(), df)
def __create_test_df(self, size):
int_data = []
float_data = []
string_data = []
for i in range(0,size):
int_data.append(i)
float_data.append(float(i))
string_data.append(str(i))
return pd.DataFrame({'int_data': int_data,
'float_data': float_data,
'string_data': string_data})
# Test if the rows are all the same...row order does not matter.
# (I do expect column order to be the same)
def __assert_join_results_equal(self, sf, expected_sf):
_assert_sframe_equal(sf, expected_sf, check_row_order=False)
def test_creation_from_dataframe(self):
# created from empty dataframe
sf_empty = SFrame(data=pd.DataFrame())
self.__test_equal(sf_empty, pd.DataFrame())
sf = SFrame(data=self.dataframe, format='dataframe')
self.__test_equal(sf, self.dataframe)
sf = SFrame(data=self.dataframe, format='auto')
self.__test_equal(sf, self.dataframe)
original_p = pd.DataFrame({'a':[1.0, float('nan')]})
effective_p = pd.DataFrame({'a':[1.0, None]})
sf = SFrame(data=original_p)
self.__test_equal(sf, effective_p)
original_p = pd.DataFrame({'a':['a',None,'b']})
sf = SFrame(data=original_p)
self.__test_equal(sf, original_p)
def test_auto_parse_csv(self):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as csvfile:
df = pd.DataFrame({'float_data': self.float_data,
'int_data': self.int_data,
'string_data': self.a_to_z[:len(self.int_data)]})
df.to_csv(csvfile, index=False)
csvfile.close()
sf = SFrame.read_csv(csvfile.name, header=True)
self.assertEqual(sf.dtype(), [float, int, str])
self.__test_equal(sf, df)
def test_parse_csv(self):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as csvfile:
self.dataframe.to_csv(csvfile, index=False)
csvfile.close()
# list type hints
sf = SFrame.read_csv(csvfile.name,
column_type_hints=[int, int, str])
self.assertEqual(sf.dtype(), [int, int, str])
sf['int_data'] = sf['int_data'].astype(int)
sf['float_data'] = sf['float_data'].astype(float)
sf['string_data'] = sf['string_data'].astype(str)
self.__test_equal(sf, self.dataframe)
# list type hints, incorrect number of columns
self.assertRaises(RuntimeError,
lambda: SFrame.read_csv(csvfile.name,
column_type_hints=[int, float]))
# dictionary type hints
sf = SFrame.read_csv(csvfile.name,
column_type_hints={'int_data': int,
'float_data': float,
'string_data': str})
self.__test_equal(sf, self.dataframe)
# partial dictionary type hints
sf = SFrame.read_csv(csvfile.name,
column_type_hints={'float_data': float,
'string_data': str})
self.__test_equal(sf, self.dataframe)
# single value type hints
sf = SFrame.read_csv(csvfile.name, column_type_hints=str)
self.assertEqual(sf.dtype(), [str, str, str])
all_string_column_df = self.dataframe.apply(lambda x: [str(ele) for ele in x])
self.__test_equal(sf, all_string_column_df)
# single value type hints row limit
sf = SFrame.read_csv(csvfile.name, column_type_hints=str, nrows=5)
self.assertEqual(sf.dtype(), [str, str, str])
all_string_column_df = self.dataframe.apply(lambda x: [str(ele) for ele in x])
self.assertEqual(len(sf), 5)
self.__test_equal(sf, all_string_column_df[0:len(sf)])
sf = SFrame.read_csv(csvfile.name)
sf2 = SFrame(csvfile.name, format='csv')
self.__test_equal(sf2, sf.to_dataframe())
f = open(csvfile.name, "w")
f.write('a,b,c\n')
f.write('NA,PIKA,CHU\n')
f.write('1.0,2,3\n')
f.close()
sf = SFrame.read_csv(csvfile.name,
na_values=['NA','PIKA','CHU'],
column_type_hints={'a':float,'b':int,'c':str})
t = list(sf['a'])
self.assertEquals(t[0], None)
self.assertEquals(t[1], 1.0)
t = list(sf['b'])
self.assertEquals(t[0], None)
self.assertEquals(t[1], 2)
t = list(sf['c'])
self.assertEquals(t[0], None)
self.assertEquals(t[1], "3")
def test_save_load_file_cleanup(self):
# when some file is in use, file should not be deleted
with util.TempDirectory() as f:
sf = SFrame()
sf['a'] = SArray(range(1,1000000))
sf.save(f)
# many for each sarray, 1 sframe_idx, 1 object.bin, 1 ini
file_count = len(os.listdir(f))
self.assertTrue(file_count > 3);
# sf1 now references the on disk file
sf1 = SFrame(f);
# create another SFrame and save to the same location
sf2 = SFrame()
sf2['b'] = SArray([str(i) for i in range(1,100000)])
sf2['c'] = SArray(range(1, 100000))
sf2.save(f)
file_count = len(os.listdir(f))
self.assertTrue(file_count > 3);
# now sf1 should still be accessible
self.__test_equal(sf1, sf.to_dataframe())
# and sf2 is correct too
sf3 = SFrame(f)
self.__test_equal(sf3, sf2.to_dataframe())
# when sf1 goes out of scope, the tmp files should be gone
sf1 = 1
time.sleep(1) # give time for the files being deleted
file_count = len(os.listdir(f))
self.assertTrue(file_count > 3);
def test_save_load(self):
# Check top level load function, with no suffix
with util.TempDirectory() as f:
sf = SFrame(data=self.dataframe, format='dataframe')
sf.save(f)
sf2 = load_sframe(f)
self.__test_equal(sf2, self.dataframe)
# Check individual formats with the SFrame constructor
formats = ['.csv']
for suffix in formats:
f = tempfile.NamedTemporaryFile(suffix=suffix, delete=False)
sf = SFrame(data=self.dataframe, format='dataframe')
sf.save(f.name)
sf2 = SFrame(f.name)
sf2['int_data'] = sf2['int_data'].astype(int)
sf2['float_data'] = sf2['float_data'].astype(float)
sf2['string_data'] = sf2['string_data'].astype(str)
self.__test_equal(sf2, self.dataframe)
g=SArray([['a','b',3],[{'a':'b'}],[1,2,3]])
g2=SFrame()
g2['x']=g
g2.save(f.name)
g3=SFrame.read_csv(f.name,column_type_hints=list)
self.__test_equal(g2, g3.to_dataframe())
f.close()
os.unlink(f.name)
# Make sure this file don't exist before testing
self.assertRaises(IOError, lambda: SFrame(data='__no_such_file__.frame_idx', format='sframe'))
if sys.platform != 'win32':
# Bad permission
test_dir = 'test_dir'
if os.path.exists(test_dir):
os.removedirs(test_dir)
os.makedirs(test_dir, mode=0000)
with self.assertRaises(IOError):
sf.save(os.path.join(test_dir, 'bad.frame_idx'))
# Permissions will affect this test first, so no need
# to write something here
with self.assertRaises(IOError):
sf2 = SFrame(os.path.join(test_dir, 'bad.frame_idx'))
# cleanup
os.removedirs(test_dir)
del sf2
def test_save_load_reference(self):
# Check top level load function, with no suffix
with util.TempDirectory() as f:
sf = SFrame(data=self.dataframe, format='dataframe')
originallen = len(sf)
sf.save(f)
del sf
sf = SFrame(f)
# make a new column of "1s and save it back
int_data2 = sf['int_data'] + 1
int_data2.__materialize__()
sf['int_data2'] = int_data2
sf._save_reference(f)
del sf
sf = SFrame(f)
self.assertTrue(((sf['int_data2'] - sf['int_data']) == 1).all())
# try to append and save reference
expected = sf.to_dataframe()
sf = sf.append(sf)
sf._save_reference(f)
sf = SFrame(f)
self.assertTrue(((sf['int_data2'] - sf['int_data']) == 1).all())
self.assertEquals(2 * originallen, len(sf))
assert_frame_equal(sf[originallen:].to_dataframe(), expected)
assert_frame_equal(sf[:originallen].to_dataframe(), expected)
def test_save_to_csv(self):
f = tempfile.NamedTemporaryFile(suffix='.csv', delete=False)
sf = SFrame(data=self.dataframe, format='dataframe')
sf.save(f.name, format='csv')
sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str})
self.__test_equal(sf2, self.dataframe)
sf.export_csv(f.name, delimiter=':')
sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':')
self.__test_equal(sf2, self.dataframe)
sf.export_csv(f.name, delimiter=':', line_terminator='\r\n')
sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':', line_terminator='\r\n')
self.__test_equal(sf2, self.dataframe)
sf.export_csv(f.name, delimiter=':', line_terminator='\r\n', double_quote=False)
sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':', line_terminator='\r\n', double_quote=False)
self.__test_equal(sf2, self.dataframe)
sf.export_csv(f.name, delimiter=':', line_terminator='\r\n', double_quote=False, quote_char='\'')
sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':', line_terminator='\r\n', double_quote=False, quote_char='\'')
self.__test_equal(sf2, self.dataframe)
import csv
sf.export_csv(f.name, delimiter=':', line_terminator='\r\n', double_quote=False, quote_char='\'', quote_level=csv.QUOTE_MINIMAL)
sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':', line_terminator='\r\n', double_quote=False, quote_char='\'')
self.__test_equal(sf2, self.dataframe)
sf.export_csv(f.name, delimiter=':', line_terminator='\r\n', double_quote=False, quote_char='\'', quote_level=csv.QUOTE_ALL)
sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':', line_terminator='\r\n', double_quote=False, quote_char='\'')
self.__test_equal(sf2, self.dataframe)
sf.export_csv(f.name, delimiter=':', line_terminator='\r\n', double_quote=False, quote_char='\'', quote_level=csv.QUOTE_NONE)
sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':', line_terminator='\r\n', double_quote=False, quote_char='\'')
self.__test_equal(sf2, self.dataframe)
# Pandas compatibility options
sf.export_csv(f.name, sep=':', lineterminator='\r\n', doublequote=False, quotechar='\'', quote_level=csv.QUOTE_NONE)
sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, sep=':', lineterminator='\r\n', doublequote=False, quotechar='\'')
self.__test_equal(sf2, self.dataframe)
f.close()
os.unlink(f.name)
def test_save_to_json(self):
f = tempfile.NamedTemporaryFile(suffix='.json', delete=False)
sf = SFrame(data=self.dataframe, format='dataframe')
sf.save(f.name, format='json')
sf2 = SFrame.read_json(f.name)
# the float column will be parsed as integer
sf2['float_data'] = sf2['float_data'].astype(float)
self.__test_equal(sf2, self.dataframe)
sf = SFrame(data=self.dataframe, format='dataframe')
sf.export_json(f.name)
sf2 = SFrame.read_json(f.name)
sf2['float_data'] = sf2['float_data'].astype(float)
self.__test_equal(sf2, self.dataframe)
sf = SFrame(data=self.dataframe, format='dataframe')
sf.export_json(f.name, orient='lines')
sf2 = SFrame.read_json(f.name, orient='lines')
sf2['float_data'] = sf2['float_data'].astype(float)
self.__test_equal(sf2, self.dataframe)
f.close()
os.unlink(f.name)
def _remove_sframe_files(self, prefix):
filelist = [ f for f in os.listdir(".") if f.startswith(prefix) ]
for f in filelist:
os.remove(f)
def test_creation_from_txt(self):
f = tempfile.NamedTemporaryFile(suffix='.txt', delete=False)
df = self.dataframe[['string_data']]
df.to_csv(f.name, index=False)
sf = SFrame(f.name)
self.assertEquals(sf['string_data'].dtype(), int)
sf['string_data'] = sf['string_data'].astype(str)
self.__test_equal(sf, df)
fgzip = tempfile.NamedTemporaryFile(suffix='.txt.gz', delete=False)
f_in = open(f.name, 'rb')
f_out = gzip.open(fgzip.name, 'wb')
f_out.writelines(f_in)
f_out.close()
f_in.close()
sf = SFrame(fgzip.name)
self.assertEquals(sf['string_data'].dtype(), int)
sf['string_data'] = sf['string_data'].astype(str)
self.__test_equal(sf, df)
fgzip.close()
os.unlink(fgzip.name)
f.close()
os.unlink(f.name)
def test_creation_from_csv_on_local(self):
if os.path.exists('./foo.csv'):
os.remove('./foo.csv')
with open('./foo.csv', 'w') as f:
url = f.name
basesf = SFrame(self.dataframe)
basesf.save(url, format="csv")
f.close()
sf = SFrame('./foo.csv')
self.assertEquals(sf['float_data'].dtype(), int)
sf['float_data'] = sf['float_data'].astype(float)
self.assertEquals(sf['string_data'].dtype(), int)
sf['string_data'] = sf['string_data'].astype(str)
self.__test_equal(sf, self.dataframe)
sf = SFrame(url)
self.assertEquals(sf['float_data'].dtype(), int)
sf['float_data'] = sf['float_data'].astype(float)
self.assertEquals(sf['string_data'].dtype(), int)
sf['string_data'] = sf['string_data'].astype(str)
self.__test_equal(sf, self.dataframe)
os.remove(url)
def test_alternate_line_endings(self):
# test Windows line endings
if os.path.exists('./windows_lines.csv'):
os.remove('./windows_lines.csv')
windows_file_url = None
with open('./windows_lines.csv', 'w') as f:
windows_file_url = f.name
def_writer = csv.writer(f, dialect='excel')
column_list = ['numbers']
def_writer.writerow(column_list)
for i in self.int_data:
def_writer.writerow([i])
sf = SFrame.read_csv('./windows_lines.csv', column_type_hints={'numbers':int})
self.assertEquals(sf.column_names(), column_list)
self.assertEquals(sf.column_types(), [int])
self.assertEquals(list(sf['numbers'].head()), self.int_data)
sf = SFrame.read_csv('./windows_lines.csv', column_type_hints={'numbers':list}, error_bad_lines=False)
self.assertEquals(sf.column_names(), column_list)
self.assertEquals(sf.num_rows(), 0)
os.remove(windows_file_url)
def test_skip_rows(self):
# test line skippng
if os.path.exists('./skip_lines.csv'):
os.remove('./skip_lines.csv')
skip_file_url = None
with open('./skip_lines.csv', 'w') as f:
f.write("trash\n");
f.write("junk\n");
skip_file_url = f.name
def_writer = csv.writer(f, dialect='excel')
column_list = ['numbers']
def_writer.writerow(column_list)
for i in self.int_data:
def_writer.writerow([i])
sf = SFrame.read_csv('./skip_lines.csv', skiprows=2, column_type_hints={'numbers':int})
self.assertEquals(sf.column_names(), column_list)
self.assertEquals(sf.column_types(), [int])
self.assertEquals(list(sf['numbers'].head()), self.int_data)
sf = SFrame.read_csv('./skip_lines.csv', skiprows=2, column_type_hints={'numbers':list}, error_bad_lines=False)
self.assertEquals(sf.column_names(), column_list)
self.assertEquals(sf.num_rows(), 0)
os.remove(skip_file_url)
def test_creation_from_csv_on_http(self):
pass
# sf = SFrame(data=self.url, use_header=False)
# self.__test_equal(sf, pd.DataFrame({'1': self.a_to_z}))
def test_creation_from_csv_on_s3(self):
# Requires s3 account for jenkins
# sf = SFrame(data='s3://graphlab-testdata/foo.csv')
# print sf.head(sf.num_rows())
pass
def test_creation_from_csv_dir_local(self):
csv_dir = "./csv_dir"
if os.path.exists(csv_dir):
shutil.rmtree(csv_dir)
os.mkdir(csv_dir)
for i in range(0, 100):
with open(os.path.join(csv_dir, 'foo.%d.csv' % i), 'w') as f:
url = f.name
self.dataframe.to_csv(url, index=False)
f.close()
singleton_sf = SFrame.read_csv(os.path.join(csv_dir, "foo.0.csv"))
self.assertEquals(singleton_sf.num_rows(), 10)
many_sf = SFrame.read_csv(csv_dir)
self.assertEquals(many_sf.num_rows(), 1000)
glob_sf = SFrame.read_csv(os.path.join(csv_dir, "foo.*2.csv"))
self.assertEquals(glob_sf.num_rows(), 100)
with self.assertRaises(RuntimeError):
SFrame.read_csv("missingdirectory")
with self.assertRaises(ValueError):
SFrame.read_csv("")
shutil.rmtree(csv_dir)
def test_creation_from_iterable(self):
# Normal dict of lists
the_dict = {'ints':self.int_data,'floats':self.float_data,'strings':self.string_data}
sf = SFrame(the_dict)
df = pd.DataFrame(the_dict)
self.__test_equal(sf, df)
# Test that a missing value does not change the data type
the_dict['ints'][0] = None
sf = SFrame(the_dict)
self.assertEquals(sf['ints'].dtype(), int)
# numpy.nan is actually a float, so it should cast the column to float
the_dict['ints'][0] = np.nan
sf = SFrame(the_dict)
self.assertEquals(sf['ints'].dtype(), float)
# Just a single list
sf = SFrame(self.int_data)
df = pd.DataFrame(self.int_data)
df.columns = ['X1']
self.__test_equal(sf, df)
# Normal list of lists
list_of_lists = [[1.0,2.0,3.0],[4.0,5.0,6.0],[7.0,8.0,9.0]]
sf = SFrame(list_of_lists)
cntr = 0
for i in sf:
self.assertEquals(list_of_lists[cntr], list(i['X1']))
cntr += 1
self.assertEquals(sf.num_columns(), 1)
the_dict = {'ints':self.int_data,'floats':self.float_data,'strings':self.string_data}
sf = SFrame(the_dict)
sf2 = SFrame({'ints':sf['ints'],'floats':sf['floats'],'strings':sf['strings']})
df = pd.DataFrame(the_dict)
self.__test_equal(sf2, df)
sf2 = SFrame([sf['ints'],sf['floats'],sf['strings']])
self.assertEquals(['X1','X2','X3'],sf2.column_names())
sf2.rename({'X1':'ints','X2':'floats','X3':'strings'})
sf2=sf2[['floats','ints','strings']]
self.__test_equal(sf2, df)
sf = SFrame({'text': ('foo', 'bar', 'biz')})
df = pd.DataFrame({'text': ['foo', 'bar', 'biz']})
self.__test_equal(sf, df)
def test_head_tail(self):
sf = SFrame(data=self.dataframe)
assert_frame_equal(sf.head(4).to_dataframe(), self.dataframe.head(4))
# Cannot test for equality the same way because of dataframe indices
taildf = sf.tail(4)
for i in range(0, 4):
self.assertEqual(taildf['int_data'][i], self.dataframe['int_data'][i+6])
self.assertEqual(taildf['float_data'][i], self.dataframe['float_data'][i+6])
self.assertEqual(taildf['string_data'][i], self.dataframe['string_data'][i+6])
def test_head_tail_edge_case(self):
sf = SFrame()
self.assertEquals(sf.head().num_columns(), 0)
self.assertEquals(sf.tail().num_columns(), 0)
self.assertEquals(sf.head().num_rows(), 0)
self.assertEquals(sf.tail().num_rows(), 0)
sf = SFrame()
sf['a'] = []
self.assertEquals(sf.head().num_columns(), 1)
self.assertEquals(sf.tail().num_columns(), 1)
self.assertEquals(sf.head().num_rows(), 0)
self.assertEquals(sf.tail().num_rows(), 0)
def test_transform(self):
sf = SFrame(data=self.dataframe)
for i in range(sf.num_cols()):
colname = sf.column_names()[i]
sa = sf.apply(lambda x: x[colname], sf.column_types()[i])
self.__assert_sarray_equal(sa, sf[sf.column_names()[i]])
sa = sf.apply(lambda x: x['int_data'] + x['float_data'], float)
self.__assert_sarray_equal(sf['int_data'] + sf['float_data'], sa)
def test_transform_with_recursion(self):
sf = SFrame(data={'a':[0,1,2,3,4], 'b':['0','1','2','3','4']})
# this should be the equivalent to sf.apply(lambda x:x since a is
# equivalent to range(4)
sa = sf.apply(lambda x: sf[x['a']])
sb = sf.apply(lambda x: x)
self.__assert_sarray_equal(sa, sb)
def test_transform_with_type_inference(self):
sf = SFrame(data=self.dataframe)
for i in range(sf.num_cols()):
colname = sf.column_names()[i]
sa = sf.apply(lambda x: x[colname])
self.__assert_sarray_equal(sa, sf[sf.column_names()[i]])
sa = sf.apply(lambda x: x['int_data'] + x['float_data'])
self.__assert_sarray_equal(sf['int_data'] + sf['float_data'], sa)
# SFrame apply returns list of vector of numeric should be vector, not list
sa = sf.apply(lambda x: [x['int_data'], x['float_data']])
self.assertEqual(sa.dtype(), array.array);
def test_transform_with_exception(self):
sf = SFrame(data=self.dataframe)
self.assertRaises(KeyError, lambda: sf.apply(lambda x: x['some random key'])) # cannot find the key
self.assertRaises(TypeError, lambda: sf.apply(lambda x: sum(x.values()))) # lambda cannot sum int and str
self.assertRaises(ZeroDivisionError, lambda: sf.apply(lambda x: x['int_data'] / 0)) # divide by 0 error
self.assertRaises(IndexError, lambda: sf.apply(lambda x: list(x.values())[10])) # index out of bound error
def test_empty_transform(self):
sf = SFrame()
b = sf.apply(lambda x:x)
self.assertEquals(len(b.head()), 0)
def test_flatmap(self):
# Correctness of typical usage
n = 10
sf = SFrame({'id': range(n)})
new_sf = sf.flat_map(["id_range"], lambda x: [[str(i)] for i in range(x['id'])])
self.assertEqual(new_sf.column_names(), ["id_range"])
self.assertEqual(new_sf.column_types(), [str])
expected_col = [str(x) for i in range(n) for x in range(i)]
self.assertListEqual(list(new_sf['id_range']), expected_col)
# Empty SFrame, without explicit column types
sf = SFrame()
with self.assertRaises(TypeError):
new_sf = sf.flat_map(['id_range'],
lambda x: [[i] for i in range(x['id'])])
# Empty rows successfully removed
sf = SFrame({'id': range(15)})
new_sf = sf.flat_map(['id'],
lambda x: [[x['id']]] if x['id'] > 8 else [])
self.assertEqual(new_sf.num_rows(), 6)
# First ten rows are empty raises error
with self.assertRaises(TypeError):
new_sf = sf.flat_map(['id'],
lambda x: [[x['id']]] if x['id'] > 9 else [])
def test_select_column(self):
sf = SFrame(data=self.dataframe)
sub_sf = sf.select_columns(['int_data', 'string_data'])
exp_df = pd.DataFrame({'int_data': self.int_data, 'string_data': self.string_data})
self.__test_equal(sub_sf, exp_df)
with self.assertRaises(ValueError):
sf.select_columns(['int_data', 'string_data', 'int_data'])
# test indexing
sub_col = sf['float_data']
self.assertEqual(list(sub_col.head(10)), self.float_data)
with self.assertRaises(TypeError):
sub_sf = sf.select_columns(['duh',1])
with self.assertRaises(TypeError):
sub_sf = sf.select_columns(0)
with self.assertRaises(RuntimeError):
sub_sf = sf.select_columns(['not_a_column'])
self.assertEqual(sf.select_columns([int]).column_names(), ['int_data'])
self.assertEqual(sf.select_columns([int, str]).column_names(), ['int_data', 'string_data'])
self.assertEqual(sf[int].column_names(), ['int_data'])
self.assertEqual(sf[[int, str]].column_names(), ['int_data', 'string_data'])
self.assertEqual(sf[int, str].column_names(), ['int_data', 'string_data'])
self.assertEqual(sf['int_data', 'string_data'].column_names(), ['int_data', 'string_data'])
self.assertEqual(sf['string_data', 'int_data'].column_names(), ['string_data', 'int_data'])
sf = SFrame()
with self.assertRaises(RuntimeError):
sf.select_column('x')
with self.assertRaises(RuntimeError):
sf.select_columns(['x'])
sf.add_column(SArray(), 'x')
# does not throw
sf.select_column('x')
sf.select_columns(['x'])
with self.assertRaises(RuntimeError):
sf.select_column('y')
with self.assertRaises(RuntimeError):
sf.select_columns(['y'])
def test_topk(self):
sf = SFrame(data=self.dataframe)
# Test that order is preserved
df2 = sf.topk('int_data').to_dataframe()
df2_expected = self.dataframe.sort('int_data', ascending=False)
df2_expected.index = range(df2.shape[0])
assert_frame_equal(df2, df2_expected)
df2 = sf.topk('float_data', 3).to_dataframe()
df2_expected = self.dataframe.sort('float_data', ascending=False).head(3)
df2_expected.index = range(3)
assert_frame_equal(df2, df2_expected)
df2 = sf.topk('string_data', 3).to_dataframe()
for i in range(0, 3):
self.assertEqual(df2['int_data'][2-i], i + 7)
with self.assertRaises(TypeError):
sf.topk(2,3)
sf = SFrame()
sf.add_column(SArray([1,2,3,4,5]), 'a')
sf.add_column(SArray([1,2,3,4,5]), 'b')
sf.topk('a', 1) # should not fail
def test_filter(self):
sf = SFrame(data=self.dataframe)
filter_sa = SArray([1,1,1,0,0,0,0,1,1,1])
sf2 = sf[filter_sa]
exp_df = sf.head(3).append(sf.tail(3))
self.__test_equal(sf2, exp_df.to_dataframe())
# filter by 1s
sf2 = sf[SArray(self.int_data)]
exp_df = sf.head(10).to_dataframe()
self.__test_equal(sf2, exp_df)
# filter by 0s
sf2 = sf[SArray([0,0,0,0,0,0,0,0,0,0])]
exp_df = sf.head(0).to_dataframe()
self.__test_equal(sf2, exp_df)
# wrong size
with self.assertRaises(IndexError):
sf2 = sf[SArray([0,1,205])]
# slightly bigger size
sf = SFrame()
n = 1000000
sf['a'] = range(n)
result = sf[sf['a'] == -1]
self.assertEquals(len(result), 0)
result = sf[sf['a'] > n - 123]
self.assertEquals(len(result), 122)
l = list(result['a'])
for i in range(len(result)):
self.assertEquals(i + n - 122, l[i])
result = sf[sf['a'] < 2000]
self.assertEquals(len(result), 2000)
l = list(result['a'])
for i in range(len(result)):
self.assertEquals(i, l[i])
def test_sample_split(self):
sf = SFrame(data=self.__create_test_df(100))
entry_list = set()
for i in sf:
entry_list.add(str(i))
sample_sf = sf.sample(.12, 9)
sample_sf2 = sf.sample(.12, 9)
self.assertEqual(len(sample_sf), len(sample_sf2))
assert_frame_equal(sample_sf.head().to_dataframe(), sample_sf2.head().to_dataframe())
for i in sample_sf:
self.assertTrue(str(i) in entry_list)
with self.assertRaises(ValueError):
sf.sample(3)
sample_sf = SFrame().sample(.12, 9)
self.assertEqual(len(sample_sf), 0)
a_split = sf.random_split(.12, 9)
first_split_entries = set()
for i in a_split[0]:
first_split_entries.add(str(i))
for i in a_split[1]:
self.assertTrue(str(i) in entry_list)
self.assertTrue(str(i) not in first_split_entries)
with self.assertRaises(ValueError):
sf.random_split(3)
self.assertEqual(len(SFrame().random_split(.4)[0]), 0)
self.assertEqual(len(SFrame().random_split(.4)[1]), 0)
# tests add_column, rename
def test_edit_column_ops(self):
sf = SFrame()
# typical add column stuff
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.float_data))
sf.add_column(SArray(self.string_data))
# Make sure auto names work
names = sf.column_names()
cntr = 1
for i in names:
self.assertEquals("X"+str(cntr), i)
cntr = cntr + 1
# Remove a column
del sf['X2']
# names
names = sf.column_names()
self.assertEquals(len(names), 2)
self.assertEquals('X1', names[0])
self.assertEquals('X3', names[1])
# check content
self.assertEquals(list(sf['X1'].head(10)), self.int_data)
self.assertEquals(list(sf['X3'].head(10)), self.string_data)
# check that a new automatically named column will not conflict
sf.add_column(SArray(self.string_data))
names = sf.column_names()
self.assertEquals(len(names), 3)
uniq_set = set()
for i in names:
uniq_set.add(i)
if len(uniq_set) == 1:
self.assertEquals(list(sf[i].head(10)), self.int_data)
else:
self.assertEquals(list(sf[i].head(10)), self.string_data)
self.assertEquals(len(uniq_set), 3)
# replacing columns preserves order
names = sf.column_names()
for n in names:
sf[n] = sf[n].apply(lambda x: x)
self.assertEquals(sf.column_names(), names)
# do it again!
del sf['X1']
sf.add_column(SArray(self.string_data))
names = sf.column_names()
self.assertEquals(len(names), 3)
uniq_set = set()
for i in names:
uniq_set.add(i)
self.assertEquals(list(sf[i].head(10)), self.string_data)
self.assertEquals(len(uniq_set), len(names))
# standard rename
rename_dict = {'X3':'data','X3.1':'more_data','X3.2':'even_more'}
sf.rename(rename_dict)
self.assertEquals(sf.column_names(), ['data','more_data','even_more'])
# rename a column to a name that's already taken
with self.assertRaises(RuntimeError):
sf.rename({'data':'more_data'})
# try to rename a column that doesn't exist
with self.assertRaises(ValueError):
sf.rename({'foo':'bar'})
# pass something other than a dict
with self.assertRaises(TypeError):
sf.rename('foo')
# Setting a column to const preserves order
names = sf.column_names()
for n in names:
sf[n] = 1
self.assertEquals(sf.column_names(), names)
def test_duplicate_add_column_failure(self):
sf = SFrame()
# typical add column stuff
sf.add_column(SArray(self.int_data), "hello")
with self.assertRaises(RuntimeError):
sf.add_column(SArray(self.float_data), "hello")
def test_remove_column(self):
sf = SFrame()
# typical add column stuff
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.float_data))
sf.add_column(SArray(self.string_data))
self.assertEquals(sf.column_names(), ['X1', 'X2', 'X3', 'X4', 'X5'])
sf2 = sf.remove_column('X3')
assert sf is sf2
self.assertEquals(sf.column_names(), ['X1', 'X2', 'X4', 'X5'])
sf2 = sf.remove_columns(['X2', 'X5'])
assert sf is sf2
self.assertEquals(sf.column_names(), ['X1', 'X4'])
# with a generator expression
sf2 = sf.remove_columns((n for n in ['X1', 'X5'] if n in sf.column_names()))
assert sf is sf2
self.assertEquals(sf.column_names(), ['X4'])
def test_remove_bad_column(self):
sf = SFrame()
# typical add column stuff
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.int_data))
sf.add_column(SArray(self.float_data))
sf.add_column(SArray(self.string_data))
self.assertEquals(sf.column_names(), ['X1', 'X2', 'X3', 'X4', 'X5'])
self.assertRaises(KeyError, lambda: sf.remove_column('bad'))
self.assertEquals(sf.column_names(), ['X1', 'X2', 'X3', 'X4', 'X5'])
self.assertRaises(KeyError, lambda: sf.remove_columns(['X1', 'X2', 'X3', 'bad', 'X4']))
self.assertEquals(sf.column_names(), ['X1', 'X2', 'X3', 'X4', 'X5'])
def __generate_synthetic_sframe__(self, num_users):
"""
synthetic collaborative data.
generate 1000 users, user i watched movie 0, ... i-1.
rating(i, j) = i + j
length(i, j) = i - j
"""
sf = SFrame()
sparse_matrix = {}
for i in range(1, num_users + 1):
sparse_matrix[i] = [(j, i + j, i - j) for j in range(1, i + 1)]
user_ids = []
movie_ids = []
ratings = []
length_of_watching = []
for u in sparse_matrix:
user_ids += [u] * len(sparse_matrix[u])
movie_ids += [x[0] for x in sparse_matrix[u]]
ratings += [x[1] for x in sparse_matrix[u]]
length_of_watching += [x[2] for x in sparse_matrix[u]]
# typical add column stuff
sf['user_id'] = (SArray(user_ids, int))
sf['movie_id'] = (SArray(movie_ids, str))
sf['rating'] = (SArray(ratings, float))
sf['length'] = (SArray(length_of_watching, int))
return sf
def test_aggregate_ops(self):
"""
Test builtin groupby aggregators
"""
for m in [1, 10, 20, 50, 100]:
values = range(m)
vector_values = [[random.randint(1,100) for num in range(10)] \
for y in range(m)]
sf = SFrame()
sf['key'] = [1] * m
sf['value'] = values
sf['vector_values'] = vector_values
sf.__materialize__()
built_ins = [aggregate.COUNT(), aggregate.SUM('value'),
aggregate.AVG('value'), aggregate.MIN('value'),
aggregate.MAX('value'), aggregate.VAR('value'),
aggregate.STDV('value'), aggregate.SUM('vector_values'),
aggregate.MEAN('vector_values'),
aggregate.COUNT_DISTINCT('value'),
aggregate.DISTINCT('value'),
aggregate.FREQ_COUNT('value')]
sf2 = sf.groupby('key', built_ins)
self.assertEquals(len(sf2), 1)
self.assertEqual(sf2['Count'][0], m)
self.assertEqual(sf2['Sum of value'][0], sum(values))
self.assertAlmostEqual(sf2['Avg of value'][0], np.mean(values))
self.assertEqual(sf2['Min of value'][0], min(values))
self.assertEqual(sf2['Max of value'][0], max(values))
self.assertAlmostEqual(sf2['Var of value'][0], np.var(values))
self.assertAlmostEqual(sf2['Stdv of value'][0], np.std(values))
np.testing.assert_almost_equal(list(sf2['Vector Sum of vector_values'][0]),
list(np.sum(vector_values, axis=0)))
np.testing.assert_almost_equal(list(sf2['Vector Avg of vector_values'][0]),
list(np.mean(vector_values, axis=0)))
self.assertEqual(sf2['Count Distinct of value'][0],
len(np.unique(values)))
self.assertEqual(sorted(sf2['Distinct of value'][0]),
sorted(list(np.unique(values))))
self.assertEqual(sf2['Frequency Count of value'][0],
{k:1 for k in np.unique(values)})
# For vectors
def test_min_max_with_missing_values(self):
"""
Test builtin groupby aggregators
"""
sf = SFrame()
sf['key'] = [1,1,1,1,1,1,2,2,2,2]
sf['value'] = [1,None,None,None,None,None, None,None,None,None]
built_ins = [aggregate.COUNT(), aggregate.SUM('value'),
aggregate.AVG('value'), aggregate.MIN('value'),
aggregate.MAX('value'), aggregate.VAR('value'),
aggregate.STDV('value'), aggregate.COUNT_DISTINCT('value'),
aggregate.DISTINCT('value'), aggregate.FREQ_COUNT('value')]
sf2 = sf.groupby('key', built_ins).sort('key')
self.assertEqual(list(sf2['Count']), [6,4])
self.assertEqual(list(sf2['Sum of value']), [1, 0])
self.assertEqual(list(sf2['Avg of value']), [1, None])
self.assertEqual(list(sf2['Min of value']), [1, None])
self.assertEqual(list(sf2['Max of value']), [1, None])
self.assertEqual(list(sf2['Var of value']), [0, 0])
self.assertEqual(list(sf2['Stdv of value']), [0, 0])
self.assertEqual(list(sf2['Count Distinct of value']), [2, 1])
self.assertEqual(set(sf2['Distinct of value'][0]), set([1, None]))
self.assertEqual(set(sf2['Distinct of value'][1]), set([None]))
self.assertEqual(sf2['Frequency Count of value'][0], {1:1, None:5})
self.assertEqual(sf2['Frequency Count of value'][1], {None:4})
def test_aggregate_ops_on_lazy_frame(self):
"""
Test builtin groupby aggregators
"""
for m in [1, 10, 20, 50, 100]:
values = range(m)
vector_values = [[random.randint(1,100) for num in range(10)] \
for y in range(m)]
sf = SFrame()
sf['key'] = [1] * m
sf['value'] = values
sf['vector_values'] = vector_values
sf['value'] = sf['value'] + 0
built_ins = [aggregate.COUNT(), aggregate.SUM('value'),
aggregate.AVG('value'), aggregate.MIN('value'),
aggregate.MAX('value'), aggregate.VAR('value'),
aggregate.STDV('value'), aggregate.SUM('vector_values'),
aggregate.MEAN('vector_values'),
aggregate.COUNT_DISTINCT('value'),
aggregate.DISTINCT('value')]
sf2 = sf.groupby('key', built_ins)
self.assertEquals(len(sf2), 1)
self.assertEqual(sf2['Count'][0], m)
self.assertEqual(sf2['Sum of value'][0], sum(values))
self.assertAlmostEqual(sf2['Avg of value'][0], np.mean(values))
self.assertEqual(sf2['Min of value'][0], min(values))
self.assertEqual(sf2['Max of value'][0], max(values))
self.assertAlmostEqual(sf2['Var of value'][0], np.var(values))
self.assertAlmostEqual(sf2['Stdv of value'][0], np.std(values))
np.testing.assert_almost_equal(list(sf2['Vector Sum of vector_values'][0]),
list(np.sum(vector_values, axis=0)))
np.testing.assert_almost_equal(list(sf2['Vector Avg of vector_values'][0]),
list(np.mean(vector_values, axis=0)))
self.assertEqual(sf2['Count Distinct of value'][0],
len(np.unique(values)))
self.assertEqual(sorted(sf2['Distinct of value'][0]),
sorted(np.unique(values)))
def test_aggregate_ops2(self):
"""
Test builtin groupby aggregators using explicit named columns
"""
for m in [1, 10, 20, 50, 100]:
values = range(m)
vector_values = [[random.randint(1,100) for num in range(10)] \
for y in range(m)]
sf = SFrame()
sf['key'] = [1] * m
sf['value'] = values
sf['vector_values'] = vector_values
built_ins = {'count':aggregate.COUNT,
'sum':aggregate.SUM('value'),
'avg':aggregate.AVG('value'),
'avg2':aggregate.MEAN('value'),
'min':aggregate.MIN('value'),
'max':aggregate.MAX('value'),
'var':aggregate.VAR('value'),
'var2':aggregate.VARIANCE('value'),
'stdv':aggregate.STD('value'),
'stdv2':aggregate.STDV('value'),
'vector_sum': aggregate.SUM('vector_values'),
'vector_mean': aggregate.MEAN('vector_values'),
'count_unique':aggregate.COUNT_DISTINCT('value'),
'unique':aggregate.DISTINCT('value'),
'frequency':aggregate.FREQ_COUNT('value')}
sf2 = sf.groupby('key', built_ins)
self.assertEquals(len(sf2), 1)
self.assertEqual(sf2['count'][0], m)
self.assertEqual(sf2['sum'][0], sum(values))
self.assertAlmostEqual(sf2['avg'][0], np.mean(values))
self.assertAlmostEqual(sf2['avg2'][0], np.mean(values))
self.assertEqual(sf2['min'][0], min(values))
self.assertEqual(sf2['max'][0], max(values))
self.assertAlmostEqual(sf2['var'][0], np.var(values))
self.assertAlmostEqual(sf2['var2'][0], np.var(values))
self.assertAlmostEqual(sf2['stdv'][0], np.std(values))
self.assertAlmostEqual(sf2['stdv2'][0], np.std(values))
np.testing.assert_almost_equal(sf2['vector_sum'][0], list(np.sum(vector_values, axis=0)))
np.testing.assert_almost_equal(sf2['vector_mean'][0], list(np.mean(vector_values, axis=0)))
self.assertEqual(sf2['count_unique'][0], len(np.unique(values)))
self.assertEqual(sorted(sf2['unique'][0]),
sorted(np.unique(values)))
self.assertEqual(sf2['frequency'][0],
{k:1 for k in np.unique(values)})
def test_groupby(self):
"""
Test builtin groupby and aggregate on different column types
"""
num_users = 500
sf = self.__generate_synthetic_sframe__(num_users=num_users)
built_ins = [aggregate.COUNT(), aggregate.SUM('rating'),
aggregate.AVG('rating'), aggregate.MIN('rating'),
aggregate.MAX('rating'), aggregate.VAR('rating'),
aggregate.STDV('rating')]
built_in_names = ['Sum', 'Avg', 'Min', 'Max', 'Var', 'Stdv']
"""
Test groupby user_id and aggregate on rating
"""
sf_user_rating = sf.groupby('user_id', built_ins)
actual = sf_user_rating.column_names()
expected = ['%s of rating' % v for v in built_in_names] \
+ ['user_id'] + ['Count']
self.assertSetEqual(set(actual), set(expected))
for row in sf_user_rating:
uid = row['user_id']
mids = range(1, uid + 1)
ratings = [uid + i for i in mids]
expected = [len(ratings), sum(ratings), np.mean(ratings),
min(ratings), max(ratings), np.var(ratings),
np.sqrt(np.var(ratings))]
actual = [row['Count']] + [row['%s of rating' % op] \
for op in built_in_names]
for i in range(len(actual)):
self.assertAlmostEqual(actual[i], expected[i])
"""
Test that count can be applied on empty aggregate column.
"""
sf_user_rating = sf.groupby("user_id", {'counter': aggregate.COUNT()})
actual = {x['user_id']: x['counter'] for x in sf_user_rating}
expected = {i: i for i in range(1, num_users + 1)}
self.assertDictEqual(actual, expected)
"""
Test groupby movie_id and aggregate on length_of_watching
"""
built_ins = [aggregate.COUNT(), aggregate.SUM('length'),
aggregate.AVG('length'), aggregate.MIN('length'),
aggregate.MAX('length'), aggregate.VAR('length'),
aggregate.STDV('length')]
sf_movie_length = sf.groupby('movie_id', built_ins)
actual = sf_movie_length.column_names()
expected = ['%s of length' % v for v in built_in_names] \
+ ['movie_id'] + ['Count']
self.assertSetEqual(set(actual), set(expected))
for row in sf_movie_length:
mid = row['movie_id']
uids = range(int(mid), num_users + 1)
values = [i - int(mid) for i in uids]
expected = [len(values), sum(values), np.mean(values), min(values),
max(values), np.var(values), np.std(values)]
actual = [row['Count']] + [row['%s of length' % op] \
for op in built_in_names]
for i in range(len(actual)):
self.assertAlmostEqual(actual[i], expected[i])
def test_quantile_groupby(self):
sf = self.__generate_synthetic_sframe__(num_users=500)
# max and min rating for each user
g = sf.groupby('user_id', [aggregate.MIN('rating'),
aggregate.MAX('rating'),
aggregate.QUANTILE('rating', 0, 1)])
self.assertEquals(len(g), 500)
for row in g:
minrating = row['Min of rating']
maxrating = row['Max of rating']
arr = list(row['Quantiles of rating'])
self.assertEquals(len(arr), 2)
self.assertEquals(arr[0], minrating)
self.assertEquals(arr[1], maxrating)
def test_argmax_argmin_groupby(self):
sf = self.__generate_synthetic_sframe__(num_users=500)
sf_ret = sf.groupby('user_id',
{'movie with max rating' : aggregate.ARGMAX('rating','movie_id'),
'movie with min rating' : aggregate.ARGMIN('rating','movie_id')})
self.assertEquals(len(sf_ret), 500)
self.assertEqual(sf_ret["movie with max rating"].dtype(), str)
self.assertEqual(sf_ret["movie with min rating"].dtype(), str)
self.assertEqual(sf_ret["user_id"].dtype(), int)
# make sure we have computed correctly.
max_d = {}
min_d = {}
for i in sf:
key = i['user_id']
if key not in max_d:
max_d[key] = (i['movie_id'],i['rating'])
min_d[key] = (i['movie_id'],i['rating'])
else:
if max_d[key][1] < i['rating']:
max_d[key] = (i['movie_id'],i['rating'])
if min_d[key][1] > i['rating']:
min_d[key] = (i['movie_id'],i['rating'])
for i in sf_ret:
key = i['user_id']
self.assertEqual(i["movie with max rating"],max_d[key][0])
self.assertEqual(i["movie with min rating"],min_d[key][0])
def test_multicolumn_groupby(self):
sf = self.__generate_synthetic_sframe__(num_users=500)
sf_um = sf.groupby(["user_id", "movie_id"], aggregate.COUNT)
# I can query it
t = sf_um.to_dataframe()
self.assertEqual(sf_um["user_id"].dtype(), int)
self.assertEqual(sf_um["movie_id"].dtype(), str)
# make sure we have counted correctly
d = {}
for i in sf:
key = str(i['user_id']) + "," + i["movie_id"]
if key not in d:
d[key] = 0
d[key] = d[key] + 1
for i in sf_um:
key = str(i['user_id']) + "," + i["movie_id"]
self.assertTrue(key in d)
self.assertEqual(i['Count'], d[key])
sf_um = sf.groupby(["movie_id", "user_id"], aggregate.COUNT())
# I can query it
t = sf_um.to_dataframe()
self.assertEqual(sf_um["user_id"].dtype(), int)
self.assertEqual(sf_um["movie_id"].dtype(), str)
# make sure we have counted correctly
d = {}
for i in sf:
key = str(i['user_id']) + "," + i["movie_id"]
if key not in d:
d[key] = 0
d[key] = d[key] + 1
for i in sf_um:
key = str(i['user_id']) + "," + i["movie_id"]
self.assertTrue(key in d)
self.assertEqual(i['Count'], d[key])
def __assert_concat_result_equal(self, result, expected, list_columns):
self.assertEqual(result.num_columns(), expected.num_columns())
for column in result.column_names():
c1 = result[column]
c2 = expected[column]
self.assertEqual(c1.dtype(), c2.dtype())
self.assertEqual(c1.size(), c2.size())
if (column in list_columns):
for i in range(len(c1)):
if (c1[i] == None):
self.assertTrue(c2[i] == None)
continue
if (c1.dtype() == dict):
for k in c1[i]:
self.assertEqual(c2[i][k], c1[i][k])
else:
s1 = list(c1[i]);
if s1 != None: s1.sort()
s2 = list(c2[i]);
if s2 != None: s2.sort()
self.assertEqual(s1, s2)
else:
self.assertEqual(list(c1),list(c2))
def test_groupby_dict_key(self):
t = SFrame({'a':[{1:2},{3:4}]})
with self.assertRaises(TypeError):
t.groupby('a', {})
def test_concat(self):
sf = SFrame()
sf['a'] = [1,1,1,1, 2,2,2, 3, 4,4, 5]
sf['b'] = [1,2,1,2, 3,3,1, 4, None, 2, None]
sf['c'] = ['a','b','a','b', 'e','e', None, 'h', 'i','j', 'k']
sf['d'] = [1.0,2.0,1.0,2.0, 3.0,3.0,1.0, 4.0, None, 2.0, None]
sf['e'] = [{'x': 1}] * len(sf['a'])
print(sf['b'].dtype())
result = sf.groupby('a', aggregate.CONCAT('b'))
expected_result = SFrame({
'a': [1,2,3,4, 5],
'List of b': [[1.,1.,2.,2.],[1.,3.,3.],[4.],[2.], []]
})
expected_result['List of b'] = expected_result['List of b'].astype(list)
self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['List of b'])
result = sf.groupby('a', aggregate.CONCAT('d'))
expected_result = SFrame({
'a': [1,2,3,4, 5],
'List of d': [[1,1,2,2],[1,3,3],[4],[2], []]
})
self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['List of d'])
result = sf.groupby('a', {'c_c' :aggregate.CONCAT('c')})
expected_result = SFrame({
'a': [1,2,3,4, 5],
'c_c': [['a','b','a','b'],['e','e'],['h'],['i','j'], ['k']]
})
self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['c_c'])
result = sf.groupby('a', aggregate.CONCAT('b','c'))
expected_result = SFrame({
'a': [1,2,3,4,5],
'Dict of b_c': [{1:'a',2:'b'},{3:'e', 1: None},{4:'h'},{2:'j'}, {}]
})
self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['Dict of b_c'])
result = sf.groupby('a', {'c_b':aggregate.CONCAT('c','b')})
expected_result = SFrame({
'a': [1,2,3,4,5],
'c_b': [{'a':1, 'b':2},{'e':3},{'h':4},{'i':None, 'j':2},{'k':None}]
})
self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['c_b'])
result = sf.groupby('a', {'cs':aggregate.CONCAT('c'), 'bs':aggregate.CONCAT('b')})
expected_result = SFrame({
'a': [1,2,3,4,5],
'bs': [[1,1,2,2],[1,3,3],[4],[2], []],
'cs': [['a','b','a','b'],['e','e'],['h'],['i','j'], ['k']]
})
expected_result['bs'] = expected_result['bs'].astype(list)
self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['bs','cs'])
#exception fail if there is not column
with self.assertRaises(TypeError):
sf.groupby('a', aggregate.CONCAT())
with self.assertRaises(KeyError):
sf.groupby('a', aggregate.CONCAT('nonexist'))
with self.assertRaises(TypeError):
sf.groupby('a', aggregate.CONCAT('e', 'a'))
def test_select_one(self):
sf = SFrame({'a':[1,1,2,2,3,3,4,4,5,5],'b':[1,2,3,4,5,6,7,8,9,10]})
res = list(sf.groupby('a', {'b':aggregate.SELECT_ONE('b')}))
self.assertEqual(len(res), 5)
for i in res:
self.assertTrue(i['b'] == 2 * i['a'] or i['b'] == 2 * i['a'] - 1)
def test_unique(self):
sf = SFrame({'a':[1,1,2,2,3,3,4,4,5,5],'b':[1,2,3,4,5,6,7,8,9,10]})
self.assertEqual(len(sf.unique()), 10)
vals = [1,1,2,2,3,3,4,4, None, None]
sf = SFrame({'a':vals,'b':vals})
res = sf.unique()
self.assertEqual(len(res), 5)
self.assertEqual(set(res['a']), set([1,2,3,4,None]))
self.assertEqual(set(res['b']), set([1,2,3,4,None]))
def test_append_empty(self):
sf_with_data = SFrame(data=self.dataframe)
empty_sf = SFrame()
self.assertFalse(sf_with_data.append(empty_sf) is sf_with_data)
self.assertFalse(empty_sf.append(sf_with_data) is sf_with_data)
self.assertFalse(empty_sf.append(empty_sf) is empty_sf)
def test_append_all_match(self):
sf1 = SFrame(data=self.dataframe)
sf2 = SFrame(data=self.dataframe2)
new_sf = sf1.append(sf2)
assert_frame_equal(self.dataframe.append(self.dataframe2, ignore_index=True), new_sf.to_dataframe())
def test_append_lazy(self):
sf1 = SFrame(data=self.dataframe)
sf2 = SFrame(data=self.dataframe2)
new_sf = sf1.append(sf2)
self.assertTrue(new_sf.__is_materialized__())
filter_sf1 = SArray([1 for i in range(sf1.num_rows())] + [0 for i in range(sf2.num_rows())])
filter_sf2 = SArray([0 for i in range(sf1.num_rows())] + [1 for i in range(sf2.num_rows())])
new_sf1 = new_sf[filter_sf1]
new_sf2 = new_sf[filter_sf2]
assert_frame_equal(self.dataframe.append(self.dataframe2, ignore_index=True), new_sf.to_dataframe())
assert_frame_equal(sf1.to_dataframe(), new_sf1.to_dataframe())
assert_frame_equal(sf2.to_dataframe(), new_sf2.to_dataframe())
row = sf1.head(1)
sf = SFrame()
for i in range(10):
sf = sf.append(row)
df = sf.to_dataframe()
for i in range(10):
self.assertEqual(list(df.iloc[[i]]), list(sf.head(1).to_dataframe().iloc[[0]]))
def test_recursive_append(self):
sf = SFrame()
for i in range(200):
sf = sf.append(SFrame(data = self.dataframe))
#consume
sf.__materialize__()
def test_print_sframe(self):
sf = SFrame()
def _test_print():
sf.__repr__()
sf._repr_html_()
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
output = StringIO()
sf.print_rows(output_file=output)
n = 20
sf['int'] = [i for i in range(n)]
sf['float'] = [float(i) for i in range(n)]
sf['str'] = [str(i) for i in range(n)]
uc = '\xe5\xa4\xa7\xe5\xa4\xb4' # dato pronounced in chinese, big head
sf['unicode'] = [uc for i in range(n)]
sf['array'] = [array.array('d', [i]) for i in range(n)]
sf['list'] = [[i, float(i), [i]] for i in range(n)]
utc = dt.datetime.strptime('2011-01-21 02:37:21', '%Y-%m-%d %H:%M:%S')
sf['dt'] = [utc for i in range(n)]
sf['img'] = [Image() for i in range(n)]
sf['long_str'] = ["".join([str(i)] * 50) for i in range(n)]
sf['long_unicode'] = ["".join([uc] * 50) for i in range(n)]
sf['bad_unicode'] = ['\x9d' + uc for i in range(n)]
_test_print()
def test_print_lazy_sframe(self):
sf1 = SFrame(data=self.dataframe)
self.assertTrue(sf1.__is_materialized__())
sf2 = sf1[sf1['int_data'] > 3]
sf2.__repr__()
sf2.__str__()
self.assertFalse(sf2.__is_materialized__())
len(sf2)
self.assertTrue(sf2.__is_materialized__())
def test_append_order_diff(self):
# name match but column order not match
sf1 = SFrame(data=self.dataframe)
sf2 = SFrame(data=self.dataframe2)
sf2.swap_columns('int_data', 'string_data')
new_sf = sf1.append(sf2)
assert_frame_equal(self.dataframe.append(self.dataframe2, ignore_index=True), new_sf.to_dataframe())
def test_append_empty_sframe(self):
sf = SFrame(data=self.dataframe)
other = SFrame()
# non empty append empty
assert_frame_equal(sf.append(other).to_dataframe(), self.dataframe)
# empty append non empty
assert_frame_equal(other.append(sf).to_dataframe(), self.dataframe)
#empty append empty
assert_frame_equal(other.append(other).to_dataframe(), pd.DataFrame())
def test_append_exception(self):
sf = SFrame(data=self.dataframe)
# column number not match
other = SFrame()
other.add_column(SArray(), "test")
self.assertRaises(RuntimeError, lambda: sf.append(other)) # column not the same
# column name not match
other = SFrame()
names = sf.column_names()
for name in sf.column_names():
other.add_column(SArray(), name)
names[0] = 'some name not match'
self.assertRaises(RuntimeError, lambda: sf.append(other))
# name match but column type order not match
sf1 = SFrame(data=self.dataframe)
sf2 = SFrame(data=self.dataframe2)
#change one column type
sf1["int_data"] = sf2.select_column("int_data").astype(float)
self.assertRaises(RuntimeError, lambda: sf.append(other))
def test_simple_joins(self):
inner_expected = SFrame()
inner_expected.add_column(SArray(['Robinson','Jones','Smith','Heisenberg','Rafferty']), 'last_name')
inner_expected.add_column(SArray([34,33,34,33,31]), 'dep_id')
inner_expected.add_column(SArray(['Clerical','Engineering','Clerical','Engineering','Sales']), 'dep_name')
# Tests the "natural join" case
beg = time.time()
res = self.employees_sf.join(self.departments_sf)
end = time.time()
print("Really small join: " + str(end-beg) + " s")
self.__assert_join_results_equal(res, inner_expected)
left_join_row = SFrame()
left_join_row.add_column(SArray(['John']), 'last_name')
left_join_row.add_column(SArray([None], int), 'dep_id')
left_join_row.add_column(SArray([None], str), 'dep_name')
left_expected = inner_expected.append(left_join_row)
# Left outer join, passing string to 'on'
res = self.employees_sf.join(self.departments_sf, how='left', on='dep_id')
self.__assert_join_results_equal(res, left_expected)
right_join_row = SFrame()
right_join_row.add_column(SArray([None], str), 'last_name')
right_join_row.add_column(SArray([35]), 'dep_id')
right_join_row.add_column(SArray(['Marketing']), 'dep_name')
right_expected = inner_expected.append(right_join_row)
# Right outer join, passing list to 'on'
res = self.employees_sf.join(self.departments_sf, how='right', on=['dep_id'])
self.__assert_join_results_equal(res, right_expected)
outer_expected = left_expected.append(right_join_row)
# Full outer join, passing dict to 'on'
res = self.employees_sf.join(self.departments_sf, how='outer', on={'dep_id':'dep_id'})
self.__assert_join_results_equal(res, outer_expected)
# Test a join on non-matching key
res = self.employees_sf.join(self.departments_sf, on={'last_name':'dep_name'})
self.assertEquals(res.num_rows(), 0)
self.assertEquals(res.num_cols(), 3)
self.assertEquals(res.column_names(), ['last_name', 'dep_id', 'dep_id.1'])
# Test a join on a non-unique key
bad_departments = SFrame()
bad_departments['dep_id'] = SArray([33,33,31,31])
bad_departments['dep_name'] = self.departments_sf['dep_name']
no_pk_expected = SFrame()
no_pk_expected['last_name'] = SArray(['Rafferty','Rafferty','Heisenberg','Jones','Heisenberg','Jones'])
no_pk_expected['dep_id'] = SArray([31,31,33,33,33,33])
no_pk_expected['dep_name'] = SArray(['Clerical','Marketing','Sales','Sales','Engineering','Engineering'])
res = self.employees_sf.join(bad_departments, on='dep_id')
self.__assert_join_results_equal(res, no_pk_expected)
# Left join on non-unique key
bad_departments = bad_departments.append(right_join_row[['dep_id', 'dep_name']])
bad_departments = bad_departments.append(right_join_row[['dep_id', 'dep_name']])
no_pk_expected = no_pk_expected.append(right_join_row)
no_pk_expected = no_pk_expected.append(right_join_row)
no_pk_expected = no_pk_expected[['dep_id', 'dep_name', 'last_name']]
res = bad_departments.join(self.employees_sf, on='dep_id', how='left')
self.__assert_join_results_equal(res, no_pk_expected)
def test_big_composite_join(self):
# Create a semi large SFrame with composite primary key (letter, number)
letter_keys = []
number_keys = []
data = []
for i in string.ascii_lowercase:
for j in range(0,100):
letter_keys.append(i)
number_keys.append(j)
which = j % 3
if which == 0:
data.append(string.ascii_uppercase)
elif which == 1:
data.append(string.digits)
elif which == 2:
data.append(string.hexdigits)
pk_gibberish = SFrame()
pk_gibberish['letter'] = SArray(letter_keys, str)
pk_gibberish['number'] = SArray(number_keys, int)
pk_gibberish['data'] = SArray(data, str)
# Some rows that won't match
more_data = []
more_letter_keys = []
more_number_keys = []
for i in range(0,40000):
more_data.append('fish')
more_letter_keys.append('A')
more_number_keys.append(200)
for i in range(0,80):
for j in range(100,1000):
more_data.append('waffles')
more_letter_keys.append(letter_keys[j])
more_number_keys.append(number_keys[j])
# Non-matching row in this stretch
if j == 147:
more_letter_keys[-1] = 'A'
for i in range(0,5000):
more_data.append('pizza')
more_letter_keys.append('Z')
more_number_keys.append(400)
join_with_gibberish = SFrame()
join_with_gibberish['data'] = SArray(more_data, str)
join_with_gibberish['moredata'] = SArray(more_data, str)
join_with_gibberish['a_number'] = SArray(more_number_keys, int)
join_with_gibberish['a_letter'] = SArray(more_letter_keys, str)
expected_answer = SFrame()
exp_letter = []
exp_number = []
exp_data = []
for i in range(0,80):
exp_letter.extend(letter_keys[100:147])
exp_number.extend(number_keys[100:147])
exp_letter.extend(letter_keys[148:1000])
exp_number.extend(number_keys[148:1000])
exp_data.extend(data[100:147])
exp_data.extend(data[148:1000])
expected_answer['letter'] = SArray(exp_letter, str)
expected_answer['number'] = SArray(exp_number, int)
expected_answer['data'] = SArray(exp_data, str)
expected_answer['data.1'] = 'waffles'
expected_answer['moredata'] = 'waffles'
beg = time.time()
res = pk_gibberish.join(join_with_gibberish, on={'letter':'a_letter','number':'a_number'})
end = time.time()
print("Join took " + str(end-beg) + " seconds")
self.__assert_join_results_equal(res, expected_answer)
def test_convert_dataframe_empty(self):
sf = SFrame()
sf['a'] = SArray([], int)
df = sf.to_dataframe()
self.assertEqual(df['a'].dtype, int)
sf1 = SFrame(df)
self.assertEquals(sf1['a'].dtype(), int)
self.assertEqual(sf1.num_rows(), 0)
def test_replace_one_column(self):
sf = SFrame()
sf['a'] = [1,2,3]
self.assertEquals(list(sf['a']), [1,2,3])
# this should succeed as we are replacing a new column
sf['a'] = [1,2]
self.assertEquals(list(sf['a']), [1,2])
# failed to add new column should revert original sframe
with self.assertRaises(TypeError):
sf['a'] = [1,2,'a']
self.assertEquals(list(sf['a']), [1,2])
# add a column with different length should fail if there are more than one column
sf = SFrame()
sf['a'] = [1,2,3]
sf['b'] = ['a', 'b', 'c']
with self.assertRaises(RuntimeError):
sf['a'] = [1,2]
def test_filter_by(self):
# Set up SFrame to filter by
sf = SFrame()
sf.add_column(SArray(self.int_data), "ints")
sf.add_column(SArray(self.float_data), "floats")
sf.add_column(SArray(self.string_data), "strings")
# Normal cases
res = sf.filter_by(SArray(self.int_data), "ints")
self.__assert_join_results_equal(res, sf)
res = sf.filter_by(SArray(self.int_data), "ints", exclude=True)
self.assertEquals(list(res), [])
res = sf.filter_by([5,6], "ints")
exp = SFrame()
exp.add_column(SArray(self.int_data[4:6]), "ints")
exp.add_column(SArray(self.float_data[4:6]), "floats")
exp.add_column(SArray(self.string_data[4:6]), "strings")
self.__assert_join_results_equal(res, exp)
exp_opposite = SFrame()
exp_opposite.add_column(SArray(self.int_data[:4]+self.int_data[6:]), "ints")
exp_opposite.add_column(SArray(self.float_data[:4]+self.float_data[6:]), "floats")
exp_opposite.add_column(SArray(self.string_data[:4]+self.string_data[6:]), "strings")
res = sf.filter_by([5,6], "ints", exclude=True)
self.__assert_join_results_equal(res, exp_opposite)
exp_one = SFrame()
exp_one.add_column(SArray(self.int_data[4:5]), "ints")
exp_one.add_column(SArray(self.float_data[4:5]), "floats")
exp_one.add_column(SArray(self.string_data[4:5]), "strings")
exp_all_but_one = SFrame()
exp_all_but_one.add_column(SArray(self.int_data[:4]+self.int_data[5:]), "ints")
exp_all_but_one.add_column(SArray(self.float_data[:4]+self.float_data[5:]), "floats")
exp_all_but_one.add_column(SArray(self.string_data[:4]+self.string_data[5:]), "strings")
res = sf.filter_by(5, "ints")
self.__assert_join_results_equal(res, exp_one)
res = sf.filter_by(5, "ints", exclude=True)
self.__assert_join_results_equal(res, exp_all_but_one)
res = sf.filter_by("5", "strings")
self.__assert_join_results_equal(res, exp_one)
res = sf.filter_by(5, "ints", exclude=True)
self.__assert_join_results_equal(res, exp_all_but_one)
# Only missing values
res = sf.filter_by([77,77,88,88], "ints")
# Test against empty SFrame with correct columns/types
self.__assert_join_results_equal(res, exp_one[exp_one['ints'] == 9000])
res = sf.filter_by([77,77,88,88], "ints", exclude=True)
self.__assert_join_results_equal(res, sf)
# Duplicate values
res = sf.filter_by([6,6,5,5,6,5,5,6,5,5,5], "ints")
self.__assert_join_results_equal(res, exp)
res = sf.filter_by([6,6,5,5,6,5,5,6,5,5,5], "ints", exclude=True)
self.__assert_join_results_equal(res, exp_opposite)
# Duplicate and missing
res = sf.filter_by([11,12,46,6,6,55,5,5], "ints")
self.__assert_join_results_equal(res, exp)
res = sf.filter_by([11,12,46,6,6,55,5,5], "ints", exclude=True)
self.__assert_join_results_equal(res, exp_opposite)
# Type mismatch
with self.assertRaises(TypeError):
res = sf.filter_by(["hi"], "ints")
# Column doesn't exist
with self.assertRaises(KeyError):
res = sf.filter_by([1,2], "intssss")
# Something that can't be turned into an SArray
with self.assertRaises(Exception):
res = sf.filter_by({1:2,3:4}, "ints")
# column_name not given as string
with self.assertRaises(TypeError):
res = sf.filter_by(1,2)
# Duplicate column names after join. Should be last because of the
# renames.
sf.rename({'ints':'id','floats':'id1','strings':'id11'})
exp.rename({'ints':'id','floats':'id1','strings':'id11'})
exp_opposite.rename({'ints':'id','floats':'id1','strings':'id11'})
res = sf.filter_by([5,6], "id")
self.__assert_join_results_equal(res, exp)
res = sf.filter_by([5,6], "id", exclude=True)
self.__assert_join_results_equal(res, exp_opposite)
# XXXXXX: should be inner function
def __test_to_from_dataframe(self, data, type):
sf = SFrame()
sf['a'] = data
df = sf.to_dataframe()
sf1 = SFrame(df)
self.assertTrue(sf1.dtype()[0]== type)
df = pd.DataFrame({'val': data})
sf1 = SFrame(df)
self.assertTrue(sf1.dtype()[0]== type)
def test_to_from_dataframe(self):
self.__test_to_from_dataframe([1,2,3], int)
self.__test_to_from_dataframe(['a', 'b', 'c'], str)
self.__test_to_from_dataframe([1.0, 2.0, 3.0], float)
self.__test_to_from_dataframe([[1, 'b', {'a': 1}], [1,2,3]], list)
self.__test_to_from_dataframe([{'a':1, 1:None}, {'b':2}], dict)
self.__test_to_from_dataframe([[1,2],[1,2],[]], array.array)
def test_pack_columns_exception(self):
sf = SFrame()
sf['a'] = [1, 2, 3, None, None]
sf['b'] = [None, '2', '3', None, '5']
sf['c'] = [None, 2.0, 3.0, None, 5.0]
# cannot pack non array value into array
with self.assertRaises(TypeError):
sf.pack_columns(dtype=array.array)
# cannnot given non numeric na vlaue to array
with self.assertRaises(ValueError):
sf.pack_columns(dtype=array.array, fill_na='c')
# cannot pack non exist columns
with self.assertRaises(ValueError):
sf.pack_columns(['d','a'])
# dtype has to be dict/array/list
with self.assertRaises(ValueError):
sf.pack_columns(dtype=str)
# pack duplicate columns
with self.assertRaises(ValueError):
sf.pack_columns(['a','a'])
# pack partial columns to array, should fail if for columns that are not numeric
with self.assertRaises(TypeError):
sf.pack_columns(['a','b'], dtype=array.array)
with self.assertRaises(TypeError):
sf.pack_columns(column_prefix = 1)
with self.assertRaises(ValueError):
sf.pack_columns(column_prefix = '1')
with self.assertRaises(ValueError):
sf.pack_columns(column_prefix = 'c', columns=['a', 'b'])
def test_pack_columns2(self):
sf = SFrame()
sf['id'] = [1, 2, 3, 4]
sf['category.a'] = [None, '2', '3', None]
sf['category.b'] = [None, 2.0, None, 4.0]
expected = SArray([
[None, None],
['2', 2.0],
['3', None],
[None, 4.0]])
result = sf.pack_columns(column_prefix='category')
self.assertEqual(result.column_names(), ['id', 'category'])
self.__assert_sarray_equal(result['id'], sf['id'])
self.__assert_sarray_equal(result['category'], expected)
result = sf.pack_columns(column_prefix='category', new_column_name="new name")
self.assertEqual(result.column_names(), ['id', 'new name'])
self.__assert_sarray_equal(result['id'], sf['id'])
self.__assert_sarray_equal(result['new name'], expected)
# default dtype is list
result = sf.pack_columns(column_prefix='category', dtype=list)
self.assertEqual(result.column_names(), ['id', 'category'])
self.__assert_sarray_equal(result['category'], expected)
# remove prefix == True by default
expected = SArray([
{},
{'a':'2', 'b':2.0},
{'a':'3'},
{'b':4.0}
])
result = sf.pack_columns(column_prefix='category', dtype=dict)
self.__assert_sarray_equal(result['category'], expected)
# remove prefix == False
expected = SArray([
{},
{'category.a':'2', 'category.b':2.0},
{'category.a':'3'},
{'category.b':4.0}
])
result = sf.pack_columns(column_prefix='category', dtype=dict, remove_prefix=False)
self.assertEqual(result.column_names(), ['id', 'category'])
self.__assert_sarray_equal(result['category'], expected)
# fill_na
expected = SArray([
{'a':1, 'b':1},
{'a':'2', 'b':2.0},
{'a':'3', 'b':1},
{'a':1, 'b':4.0}
])
result = sf.pack_columns(column_prefix='category', dtype=dict, fill_na = 1)
self.__assert_sarray_equal(result['category'], expected)
expected = SArray([
[1],
[2],
[3],
[4]], list)
result = sf.pack_columns(['id'], new_column_name='id')
self.assertEqual(sorted(result.column_names()), sorted(['id', 'category.a', 'category.b']))
self.__assert_sarray_equal(result['id'], expected)
def test_pack_columns(self):
sf = SFrame()
sf['id'] = [1, 2, 3, 4, 5]
sf['b'] = [None, '2', '3', None, '5']
sf['c'] = [None, 2.0, 3.0, None, 5.0]
expected_all_default = SArray([
[1, None, None],
[2, '2', 2.0],
[3, '3', 3.0],
[4, None, None],
[5, '5', 5.0]
])
# pack all columns, all default values
self.__assert_sarray_equal(sf.pack_columns()['X1'], expected_all_default)
expected_ab_default = SArray([
[1, None],
[2, '2'],
[3, '3'],
[4, None],
[5, '5']
])
expected_all_fillna_1 = SArray([
[1, -1, -1],
[2, '2', 2.0],
[3, '3', 3.0],
[4, -1, -1],
[5, '5', 5.0]
])
# pack all columns do not drop na and also fill with some value
result = sf.pack_columns(fill_na=-1)
self.assertEqual(result.column_names(), ['X1'])
self.__assert_sarray_equal(result['X1'], expected_all_fillna_1)
# pack partial columns, all default value
result = sf.pack_columns(['id','b'])
self.assertEqual(result.column_names(), ['c','X2'])
self.__assert_sarray_equal(result['c'], sf['c'])
self.__assert_sarray_equal(result['X2'], expected_ab_default)
expected_sarray_ac_fillna_default = SArray([
[1, float('NaN')],
[2, 2.0],
[3, 3.0],
[4, float('NaN')],
[5, 5.0]
])
result = sf.pack_columns(['id','c'], dtype=array.array)
self.assertEqual(result.column_names(), ['b', 'X2'])
self.__assert_sarray_equal(result['b'], sf['b'])
self.__assert_sarray_equal(result['X2'], expected_sarray_ac_fillna_default)
expected_dict_default = SArray([
{'id': 1},
{'id': 2, 'b':'2', 'c': 2.0},
{'id': 3, 'b':'3', 'c': 3.0},
{'id':4 },
{'id':5, 'b':'5', 'c': 5.0}
])
result = sf.pack_columns(dtype=dict)
self.__assert_sarray_equal(result['X1'], expected_dict_default)
expected_dict_fillna = SArray([
{'id': 1, 'b':-1, 'c': -1},
{'id': 2, 'b':'2', 'c': 2.0},
{'id': 3, 'b':'3', 'c': 3.0},
{'id': 4, 'b':-1, 'c': -1},
{'id': 5, 'b':'5', 'c': 5.0}
])
result = sf.pack_columns(dtype=dict, fill_na=-1)
self.__assert_sarray_equal(result['X1'], expected_dict_fillna)
# pack large number of rows
sf = SFrame()
num_rows = 100000
sf['a'] = range(0, num_rows);
sf['b'] = range(0, num_rows);
result = sf.pack_columns(['a', 'b']);
self.assertEqual(len(result), num_rows);
def test_pack_columns_dtype(self):
a = SFrame({'name':[-140500967,-1405039672],'data':[3,4]})
b = a.pack_columns(['name','data'],dtype=array.array)
expected = SArray([[-140500967, 3],[-1405039672,4]])
self.__assert_sarray_equal(b['X1'], expected)
def test_unpack_dict_mixtype(self):
sf = SFrame({'a':[{'a':["haha", "hoho"]}, {'a':array.array('d', [1,2,3])}]})
sf = sf.unpack('a', column_name_prefix = '')
self.assertEqual(sf['a'].dtype(), list)
sf = SFrame({'a':[{'a':["haha", "hoho"]}, {'a':None}]})
sf = sf.unpack('a', column_name_prefix = '')
self.assertEqual(sf['a'].dtype(), list)
sa = SArray([{'a':array.array('d', [1,2,3])}, {'a':None}])
sf = sa.unpack(column_name_prefix = '')
self.assertEqual(sf['a'].dtype(), array.array)
sa = SArray([{'a':array.array('d', [1,2,3])}, {'a':{'b':1}}])
sf = sa.unpack(column_name_prefix = '')
self.assertEqual(sf['a'].dtype(), str)
sa = SArray([{'a': 1, 'b': 0.1}, {'a': 0.1, 'b': 1}])
sf = sa.unpack(column_name_prefix = '')
self.assertEqual(sf['a'].dtype(), float)
self.assertEqual(sf['b'].dtype(), float)
def test_unpack_list(self):
sa = SArray([
[1, None, None],
[2, '2', 2.0],
[3, '3', 3.0],
[4, None, None],
[5, '5', 5.0]
])
expected = SFrame()
expected ['a'] = [1, 2, 3, 4, 5]
expected ['b'] = [None, '2', '3', None, '5']
expected ['c'] = [None, 2.0, 3.0, None, 5.0]
result = sa.unpack();
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
result = sa.unpack(column_name_prefix='ttt');
self.assertEqual(result.column_names(), ['ttt.0', 'ttt.1', 'ttt.2'])
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
# column types
result = sa.unpack(column_types=[int, str, float]);
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
# more column types
result = sa.unpack(column_types=[int, str, float, int]);
result.rename(dict(zip(result.column_names(), ['a','b','c','d'])))
e = expected.select_columns(['a','b','c'])
e.add_column(SArray([None for i in range(5)], int),'d')
assert_frame_equal(result.to_dataframe(), e.to_dataframe())
# less column types
result = sa.unpack(column_types=[int, str]);
result.rename(dict(zip(result.column_names(), ['a','b'])))
e = expected.select_columns(['a','b'])
assert_frame_equal(result.to_dataframe(), e.to_dataframe())
# fill na_value
e = SFrame()
e['a'] = [1, 2, None, 4, 5]
e['b'] = [None, '2', '3', None, '5']
e['c'] = [None, 2.0, None, None, 5.0]
result = sa.unpack(na_value=3);
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), e.to_dataframe())
# wrong length
with self.assertRaises(TypeError):
sa.unpack(column_name_prefix=['a','b'])
# wrong type
with self.assertRaises(RuntimeError):
sa.unpack(column_types = [str, int, float])
# wrong limit types
with self.assertRaises(TypeError):
sa.unpack(limit=["1"])
# int array cannot be unpacked
with self.assertRaises(TypeError):
SArray([1,2,3,4]).unpack()
# column name must be a string
with self.assertRaises(TypeError):
sa.unpack(1)
# invalid column type
with self.assertRaises(TypeError):
sa.unpack(column_types = int)
# invalid column type
with self.assertRaises(TypeError):
sa.unpack(column_types = [np.array])
# cannot infer type if no values
with self.assertRaises(RuntimeError):
SArray([], list).unpack()
def test_unpack_array(self):
import array
sa = SArray([
array.array('d', [1, 1, 0]),
array.array('d', [2, -1, 1]),
array.array('d', [3, 3, 2]),
array.array('d', [-1, 2, 3]),
array.array('d', [5, 5, 4])
])
expected = SFrame()
expected ['a'] = [1.0, 2.0, 3.0, -1.0, 5.0]
expected ['b'] = [1.0, -1.0, 3.0, 2.0, 5.0]
expected ['c'] = [0.0, 1.0, 2.0, 3.0, 4.0]
result = sa.unpack();
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
# right amount column names
result = sa.unpack(column_name_prefix = 'unpacked');
result.rename(dict(zip(result.column_names(), ['t.0', 't.1', 't.2'])))
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
# column types
result = sa.unpack(column_types=[int, str, float]);
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
expected['a'] = expected['a'].astype(int)
expected['b'] = expected['b'].astype(str)
expected['c'] = expected['c'].astype(float)
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
# more column types
result = sa.unpack(column_types=[int, str, float, int]);
result.rename(dict(zip(result.column_names(), ['a','b','c','d'])))
e = expected.select_columns(['a','b','c'])
e.add_column(SArray([None for i in range(5)], int),'d')
assert_frame_equal(result.to_dataframe(), e.to_dataframe())
# less column types
result = sa.unpack(column_types=[int, str]);
result.rename(dict(zip(result.column_names(), ['a','b'])))
e = expected.select_columns(['a','b'])
assert_frame_equal(result.to_dataframe(), e.to_dataframe())
# fill na_value
e = SFrame()
e['a'] = SArray([1, 2, 3, None, 5], float)
e['b'] = SArray([1, None, 3, 2, 5], float)
e['c'] = SArray([0, 1, 2, 3, 4], float)
result = sa.unpack(na_value=-1);
result.rename(dict(zip(result.column_names(), ['a','b','c'])))
assert_frame_equal(result.to_dataframe(), e.to_dataframe())
def test_unpack_dict(self):
sf = SFrame()
sf["user_id"] = [1,2,3,4,5,6,7]
sf["is_restaurant"] = [1, 1,0,0, 1, None, None]
sf["is_retail"] = [None,1,1,None,1, None, None]
sf["is_electronics"] = ["yes", "no","yes",None,"no", None, None]
packed_sf = SFrame()
packed_sf['user_id'] = sf['user_id']
packed_sf["category"] = [
{"is_restaurant": 1, "is_electronics": "yes"},
{"is_restaurant": 1, "is_retail": 1, "is_electronics": "no"},
{"is_restaurant": 0, "is_retail": 1, "is_electronics": "yes"},
{"is_restaurant": 0 },
{"is_restaurant": 1, "is_retail": 1, "is_electronics": "no"},
{ },
None]
with self.assertRaises(TypeError):
packed_sf['user_id'].unpack()
with self.assertRaises(TypeError):
packed_sf['category'].unpack(1)
with self.assertRaises(TypeError):
packed_sf['category'].unpack(value_types = [int])
# unpack only one column
expected_sf = SFrame()
expected_sf["is_retail"] = sf["is_retail"]
unpacked_sf = packed_sf['category'].unpack(limit=["is_retail"], column_types=[int], column_name_prefix=None)
assert_frame_equal(unpacked_sf.to_dataframe(), expected_sf.to_dataframe())
# unpack all
unpacked_sf = packed_sf['category'].unpack(column_name_prefix=None, column_types=[int, int, str], limit=["is_restaurant", "is_retail", "is_electronics"])
assert_frame_equal(unpacked_sf.to_dataframe(), sf[["is_restaurant", "is_retail", "is_electronics"]].to_dataframe())
# auto infer types, the column order may be different, so use order here before comparison
unpacked_sf = packed_sf["category"].unpack()
unpacked_sf.rename({
"X.is_restaurant": "is_restaurant",
"X.is_retail": "is_retail",
"X.is_electronics": "is_electronics"
})
assert_frame_equal(unpacked_sf.to_dataframe().sort(axis=1), sf[["is_restaurant", "is_retail", "is_electronics"]].to_dataframe().sort(axis=1))
unpacked_sf = packed_sf["category"].unpack(na_value = 0, column_name_prefix="new")
expected = SFrame()
expected["new.is_restaurant"] = [1, 1,None,None, 1, None, None]
expected["new.is_retail"] = [None,1,1,None,1, None, None]
expected["new.is_electronics"] = ["yes", "no","yes",None,"no", None, None]
assert_frame_equal(unpacked_sf.to_dataframe().sort(axis=1), expected.to_dataframe().sort(axis=1))
# unpack a dictionary key integer as key
sa = SArray([
{1: 'a'},
{2: 'b'}
])
result = sa.unpack()
expected = SFrame({'X.1':['a', None], 'X.2':[None, 'b']})
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
result = sa.unpack(limit=[2])
expected = SFrame({'X.2':[None, 'b']})
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
result = sa.unpack(limit=[2], column_name_prefix="expanded")
expected = SFrame({'expanded.2':[None, 'b']})
assert_frame_equal(result.to_dataframe(), expected.to_dataframe())
sa = SArray([{i:i} for i in range(500)])
unpacked_sa = sa.unpack()
self.assertEqual(len(unpacked_sa), len(sa))
i = 0
for v in unpacked_sa:
for j in range(500):
val = v['X.' + str(j)]
if (j == i):
self.assertEqual(val, i);
else:
self.assertEqual(val, None);
i = i + 1
# if types don't agree, convert to string automatically
sa = SArray([{'a':1},{'a': 'a_3'}])
sf = sa.unpack()
self.assertEqual(sf.column_types(), [str])
sa = SArray([{'a':None}, {'a': 1}])
sf = sa.unpack()
self.assertEqual(sf.column_types(), [int])
sa = SArray([{'a':1}, {'a': None}])
sf = sa.unpack()
self.assertEqual(sf.column_types(), [int])
# type inferrence is already at server side even if limit is given
sa = SArray([{'c'+str(i): i if i % 2 == 0 else 'v' + str(i)} for i in range(1000)])
unpacked = sa.unpack(limit=['c'+str(i) for i in range(10)], column_name_prefix="")
for i in range(10):
v = unpacked[i]
for j in range(10):
if (j != i):
self.assertEqual(v['c'+str(j)], None)
elif j % 2 == 0:
self.assertEqual(v['c'+str(j)], j)
else:
self.assertEqual(v['c'+str(j)], 'v' + str(j))
def test_unpack_sframe(self):
sf = SFrame()
sf['user_id'] = range(7)
sf["category"] = [
{"is_restaurant": 1, "is_electronics": "yes"},
{"is_restaurant": 1, "is_retail": 1, "is_electronics": "no"},
{"is_restaurant": 0, "is_retail": 1, "is_electronics": "yes"},
{"is_restaurant": 0 },
{"is_restaurant": 1, "is_retail": 1, "is_electronics": "no"},
{ },
None]
sf['list'] = [
None,
range(1),
range(2),
range(3),
range(1),
range(2),
range(3),
]
with self.assertRaises(TypeError):
sf.unpack('user_id')
expected = SFrame()
expected['user_id'] = sf['user_id']
expected['list'] = sf['list']
expected["is_restaurant"] = [1, 1,0,0, 1, None, None]
expected["is_retail"] = [None,1,1,None,1, None, None]
expected["is_electronics"] = ["yes", "no","yes",None,"no", None, None]
result = sf.unpack('category')
result.rename({
'category.is_restaurant': 'is_restaurant',
'category.is_retail': 'is_retail',
'category.is_electronics': 'is_electronics'
})
assert_frame_equal(expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
result = sf.unpack(unpack_column='category', column_name_prefix="")
assert_frame_equal(expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
result = sf.unpack(unpack_column='category', column_name_prefix="abc")
result.rename({
'abc.is_restaurant': 'is_restaurant',
'abc.is_retail': 'is_retail',
'abc.is_electronics': 'is_electronics'
})
assert_frame_equal(expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
result = sf.unpack(unpack_column='category', column_name_prefix="", column_types=[str], limit=['is_restaurant'])
new_expected = expected[['user_id', 'list', 'is_restaurant']]
new_expected['is_restaurant'] = new_expected['is_restaurant'].astype(str)
assert_frame_equal(new_expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
result = sf.unpack(unpack_column='category', column_name_prefix="", na_value = None)
assert_frame_equal(expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
result = sf.unpack(unpack_column='list')
expected = SFrame()
expected['user_id'] = sf['user_id']
expected['list.0'] = [None,0,0,0, 0,0,0]
expected['list.1'] = [None,None,1,1, None,1,1]
expected['list.2'] = [None,None,None,2, None, None,2]
expected['category'] = sf['category']
assert_frame_equal(expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
result = sf.unpack(unpack_column='list', na_value= 2)
expected = SFrame()
expected['user_id'] = sf['user_id']
expected['list.0'] = [None,0,0,0, 0,0,0]
expected['list.1'] = [None,None,1,1, None,1,1]
expected['list.2'] = [None,None,None,None, None, None,None]
expected['category'] = sf['category']
assert_frame_equal(expected.to_dataframe().sort(axis=1), result.to_dataframe().sort(axis=1))
# auto resolving conflicting names
sf = SFrame()
sf['a'] = range(100)
sf['b'] = [range(5) for i in range(100)]
sf['b.0'] = range(100)
sf['b.0.1'] = range(100)
result = sf.unpack('b')
self.assertEqual(result.column_names(), ['a', 'b.0', 'b.0.1', 'b.0.1.1', 'b.1.1.1', 'b.2.1.1', 'b.3.1.1', 'b.4.1.1'])
sf = SFrame()
sf['a'] = range(100)
sf['b'] = [{'str1': i, 'str2':i + 1} for i in range(100)]
sf['b.str1'] = range(100)
result = sf.unpack('b')
self.assertEqual(len(result.column_names()), 4)
def test_stack_dict(self):
sf = SFrame()
sf["user_id"] = [1,2,3,4,5]
sf["user_name"] = ['user' + str(i) for i in list(sf['user_id'])]
sf["category"] = [
{"is_restaurant": 1, },
{"is_restaurant": 0, "is_retail": 1 },
{ "is_retail": 0 },
{},
None]
expected_sf = SFrame();
expected_sf["user_id"] = [1,2, 2, 3,4,5]
expected_sf["user_name"] = ['user' + str(i) for i in list(expected_sf['user_id'])]
expected_sf['category'] = ['is_restaurant', 'is_restaurant', 'is_retail', 'is_retail', None, None]
expected_sf['value'] = [1,0,1,0, None, None]
df_expected = expected_sf.to_dataframe().sort(['user_id', 'category']).reset_index(drop=True)
with self.assertRaises(TypeError):
sf.stack()
with self.assertRaises(ValueError):
sf.stack('sss')
with self.assertRaises(ValueError):
sf.stack('category', ['user_id', 'value'])
# normal case
stacked_sf = sf.stack('category', ['category', 'value'])
assert_frame_equal(stacked_sf.to_dataframe().sort(["user_id", "category"]).reset_index(drop=True), df_expected)
# set column types
stacked_sf = sf.stack('category')
self.assertTrue(stacked_sf.column_types()[2] == str)
self.assertTrue(stacked_sf.column_types()[3] == int)
# auto generate column names
stacked_sf = sf.stack('category')
new_column_names = stacked_sf.column_names()
self.assertTrue(len(new_column_names) == 4)
expected_sf.rename({'category':new_column_names[2], 'value':new_column_names[3]})
df_expected = expected_sf.to_dataframe().sort(['user_id', new_column_names[2]]).reset_index(drop=True)
assert_frame_equal(stacked_sf.to_dataframe().sort(["user_id", new_column_names[2]]).reset_index(drop=True), df_expected)
#dropna
expected_sf = SFrame();
expected_sf["user_id"] = [1,2, 2, 3, 4, 5]
expected_sf["user_name"] = ['user' + str(i) for i in list(expected_sf['user_id'])]
expected_sf['category'] = ['is_restaurant', 'is_restaurant', 'is_retail', 'is_retail', None, None]
expected_sf['value'] = [1,0,1,0, None, None]
df_expected = expected_sf.to_dataframe().sort(['user_id', 'category']).reset_index(drop=True)
stacked_sf = sf.stack('category', ['category','value'], drop_na = False)
assert_frame_equal(stacked_sf.to_dataframe().sort(["user_id", "category"]).reset_index(drop=True), df_expected)
sf = SFrame()
sf['a'] = SArray(([{}] * 100) + [{'a':1}])
# its a dict need 2 types
with self.assertRaises(ValueError):
sf.stack('a',['key', 'value'], new_column_type=[str])
with self.assertRaises(ValueError):
sf.stack('a',['key', 'value'], new_column_type=str)
sf.stack('a',['key', 'value'], new_column_type=[str, int])
expected_sf = SFrame()
expected_sf['key'] = SArray([None] * 100 + ["a"])
expected_sf['value'] = SArray([None] * 100 + [1])
def test_stack_list(self):
sf = SFrame()
sf["a"] = [1,2,3,4,5]
sf["b"] = [['a', 'b'], ['c'], ['d'],['e', None], None]
expected_result = SFrame()
expected_result['a'] = [1,1,2,3,4,4,5]
expected_result['X1'] = ['a','b','c','d','e',None, None]
with self.assertRaises(TypeError):
sf.stack()
with self.assertRaises(ValueError):
sf.stack('sss')
with self.assertRaises(TypeError):
sf.stack('a')
with self.assertRaises(TypeError):
sf.stack('b', ["something"])
result = sf.stack("b", drop_na = False)
stacked_column_name = result.column_names()[1]
expected_result.rename({'X1':stacked_column_name})
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
# default drop_na=False
result = sf.stack("b")
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
result = sf.stack("b", new_column_name = "b", drop_na = False)
expected_result.rename({stacked_column_name: 'b'})
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
result = sf.stack("b", new_column_name = "b", drop_na = False)
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
# drop_na=True
result = sf.stack("b", drop_na = True)
expected_result = SFrame()
expected_result['a'] = [1,1,2,3,4,4]
expected_result[result.column_names()[1]] = ['a','b','c','d','e',None]
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
sf = SFrame()
n = 1000000
sf['a'] = range(1,n)
sf['b'] = [[str(i), str(i+1)] for i in range(1,n)]
result = sf.stack('b')
self.assertTrue(len(result), n * 2)
sf = SFrame()
sf['a'] = SArray(([[]] * 100) + [['a','b']])
# its a dict need 2 types
with self.assertRaises(ValueError):
sf.stack('a', 'a', new_column_type=[str, int])
sf.stack('a', 'a', new_column_type=str)
expected_sf = SFrame()
expected_sf['a'] = SArray([None] * 100 + ["a", "b"])
def test_stack_vector(self):
sf = SFrame()
sf["a"] = [1,2,3,4,5]
sf["b"] = [[1],[1,2],[1,2,3],[1,2,3,4],None]
expected_result = SFrame()
expected_result['a'] = [1,2,2,3,3,3,4,4,4,4,5]
expected_result['X1'] = [1,1,2,1,2,3,1,2,3,4,None]
with self.assertRaises(TypeError):
sf.stack()
with self.assertRaises(ValueError):
sf.stack('sss')
with self.assertRaises(TypeError):
sf.stack('a')
with self.assertRaises(TypeError):
sf.stack('b', ["something"])
result = sf.stack("b", drop_na = False)
stacked_column_name = result.column_names()[1]
expected_result.rename({'X1':stacked_column_name})
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
# default drop_na=False
result = sf.stack("b")
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
result = sf.stack("b", new_column_name = "b", drop_na = False)
expected_result.rename({stacked_column_name: 'b'})
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
result = sf.stack("b", new_column_name = "b", drop_na = False)
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
# drop_na=True
result = sf.stack("b", drop_na = True)
expected_result = SFrame()
expected_result['a'] = [1,2,2,3,3,3,4,4,4,4]
expected_result[result.column_names()[1]] = SArray([1,1,2,1,2,3,1,2,3,4], float)
assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())
import array
sf = SFrame()
sf['a'] = SArray(([array.array('d')] * 100) + [array.array('d',[1.0,2.0])])
# its a dict need 2 types
with self.assertRaises(ValueError):
sf.stack('a', 'a', new_column_type=[str, int])
sf.stack('a', 'a', new_column_type=int)
expected_sf = SFrame()
expected_sf['a'] = SArray([None] * 100 + [1, 2])
def test_unstack_dict(self):
sf = SFrame()
sf["user_id"] = [1,2,3,4]
sf["user_name"] = ['user' + str(i) for i in list(sf['user_id'])]
sf["categories"] = [
{"is_restaurant": 1, },
{"is_restaurant": 0, "is_retail": 1 },
{ "is_retail": 0 },
None]
stacked_sf = sf.stack('categories', ['category', 'value'], drop_na=False)
# normal unstack
unstacked_sf = stacked_sf.unstack(column=['category', 'value'], new_column_name = 'categories')
# these frames are *almost* equal except user4 will be {} instead of None
assert_frame_equal(sf.fillna('categories',{}).to_dataframe(), unstacked_sf.to_dataframe().sort("user_id").reset_index(drop=True))
# missing new column name
unstacked_sf = stacked_sf.unstack(['category', 'value'])
self.assertEqual(len(unstacked_sf.column_names()), 3)
unstacked_sf.rename({unstacked_sf.column_names()[2] : 'categories'})
assert_frame_equal(sf.fillna('categories',{}).to_dataframe(), unstacked_sf.to_dataframe().sort("user_id").reset_index(drop=True))
# missing column names
with self.assertRaises(KeyError):
stacked_sf.unstack(['category','value1'])
# wrong input
with self.assertRaises(TypeError):
stacked_sf.unstack(['category'])
# duplicate new column name
with self.assertRaises(RuntimeError):
unstacked_sf = stacked_sf.unstack(['category', 'value'], 'user_name')
def test_unstack_list(self):
sf = SFrame()
sf['a'] = [1,2,3,4]
sf['b'] = [range(10), range(20), range(30), range(50)]
stacked_sf = sf.stack('b', new_column_name = 'new_b')
unstacked_sf = stacked_sf.unstack('new_b', new_column_name = 'b')
self.__assert_concat_result_equal(sf.sort('a'), unstacked_sf.sort('a'), ['b'])
unstacked_sf = stacked_sf.unstack('new_b')
unstacked_sf.rename({unstacked_sf.column_names()[1]: 'b'})
self.__assert_concat_result_equal(sf.sort('a'), unstacked_sf.sort('a'), ['b'])
unstacked_sf = stacked_sf.unstack('new_b', new_column_name='b')
unstacked_sf.rename({unstacked_sf.column_names()[1]: 'b'})
self.__assert_concat_result_equal(sf.sort('a'), unstacked_sf.sort('a'), ['b'])
with self.assertRaises(RuntimeError):
stacked_sf.unstack('new_b', new_column_name='a')
with self.assertRaises(TypeError):
stacked_sf.unstack(['new_b'])
with self.assertRaises(KeyError):
stacked_sf.unstack('non exist')
def test_content_identifier(self):
sf = SFrame({"a":[1,2,3,4],"b":["1","2","3","4"]})
a1 = sf['a'].__get_content_identifier__()
a2 = sf['a'].__get_content_identifier__()
self.assertEquals(a1, a2)
def test_random_access(self):
t1 = list(range(0,100000))
t2 = [str(i) for i in t1]
t = [{'t1':t1[i], 't2':t2[i]} for i in range(len(t1))];
s = SFrame({'t1':t1,'t2':t2})
# simple slices
self.__test_equal(s[1:10000], pd.DataFrame(t[1:10000]))
self.__test_equal(s[0:10000:3], pd.DataFrame(t[0:10000:3]))
self.__test_equal(s[1:10000:3], pd.DataFrame(t[1:10000:3]))
self.__test_equal(s[2:10000:3], pd.DataFrame(t[2:10000:3]))
self.__test_equal(s[3:10000:101], pd.DataFrame(t[3:10000:101]))
# negative slices
self.__test_equal(s[-5:], pd.DataFrame(t[-5:]))
self.__test_equal(s[-1:], pd.DataFrame(t[-1:]))
self.__test_equal(s[-100:-10], pd.DataFrame(t[-100:-10]))
self.__test_equal(s[-100:-10:2], pd.DataFrame(t[-100:-10:2]))
# single element reads
self.assertEqual(s[511], t[511])
self.assertEqual(s[1912],t[1912])
self.assertEqual(s[-1], t[-1])
self.assertEqual(s[-10],t[-10])
# edge case odities
self.__test_equal(s[10:100:100], pd.DataFrame(t[10:100:100]))
self.__test_equal(s[-100:len(s):10], pd.DataFrame(t[-100:len(t):10]))
self.assertEqual(len(s[-1:-2]), 0)
self.assertEqual(len(s[-1:-1000:2]), 0)
with self.assertRaises(IndexError):
s[len(s)]
def sort_n_rows(self, nrows=100):
nrows += 1
sf = SFrame()
sf['a'] = range(1, nrows)
sf['b'] = [float(i) for i in range(1,nrows)]
sf['c'] = [str(i) for i in range(1,nrows)]
sf['d'] = [[i, i+1] for i in range(1,nrows)]
reversed_sf = SFrame()
reversed_sf['a'] = range(nrows-1, 0, -1)
reversed_sf['b'] = [float(i) for i in range(nrows-1, 0, -1)]
reversed_sf['c'] = [str(i) for i in range(nrows-1, 0, -1)]
reversed_sf['d'] = [[i, i+1] for i in range(nrows-1, 0, -1)]
with self.assertRaises(TypeError):
sf.sort()
with self.assertRaises(TypeError):
sf.sort(1)
with self.assertRaises(TypeError):
sf.sort("d")
with self.assertRaises(ValueError):
sf.sort("nonexist")
with self.assertRaises(TypeError):
sf.sort({'a':True})
result = sf.sort('a')
assert_frame_equal(sf.to_dataframe(), result.to_dataframe());
# try a lazy input
result = sf[sf['a'] > 10].sort('a')
assert_frame_equal(sf[sf['a'] > 10].to_dataframe(), result.to_dataframe());
result = sf.sort('a', ascending = False)
assert_frame_equal(reversed_sf.to_dataframe(), result.to_dataframe());
# lazy reversed
result = sf[sf['a'] > 10].sort('a', ascending = False)
assert_frame_equal(reversed_sf[reversed_sf['a'] > 10].to_dataframe(), result.to_dataframe());
# lazy reversed
result = sf[sf['a'] > 10].sort('a', ascending = False)
assert_frame_equal(reversed_sf[reversed_sf['a'] > 10].to_dataframe(), result.to_dataframe());
# sort two columns
result = sf.sort(['a', 'b'])
assert_frame_equal(sf.to_dataframe(), result.to_dataframe());
result = sf.sort(['a', 'c'], ascending = False)
assert_frame_equal(reversed_sf.to_dataframe(), result.to_dataframe());
result = sf.sort([('a', True), ('b', False)])
assert_frame_equal(sf.to_dataframe(), result.to_dataframe());
result = sf.sort([('a', False), ('b', True)])
assert_frame_equal(reversed_sf.to_dataframe(), result.to_dataframe());
# empty sort should not throw
sf = SFrame({'x':[]})
sf.sort('x')
def test_sort(self):
#self.sort_n_rows(100)
for i in range(1, 10):
self.sort_n_rows(i)
def test_dropna(self):
# empty case
sf = SFrame()
self.assertEquals(len(sf.dropna()), 0)
# normal case
self.__test_equal(self.employees_sf.dropna(), self.employees_sf[0:5].to_dataframe())
test_split = self.employees_sf.dropna_split()
self.__test_equal(test_split[0], self.employees_sf[0:5].to_dataframe())
self.__test_equal(test_split[1], self.employees_sf[5:6].to_dataframe())
# create some other test sframe
test_sf = SFrame({'ints':SArray([None,None,3,4,None], int),
'floats':SArray([np.nan,2.,3.,4.,np.nan],float),
'strs':SArray(['1',np.nan,'','4',None], str),
'lists':SArray([[1],None,[],[1,1,1,1],None], list),
'dicts':SArray([{1:2},{2:3},{},{4:5},None], dict)})
# another normal, but more interesting case
self.__test_equal(test_sf.dropna(),
pd.DataFrame({'ints':[3,4],'floats':[3.,4.],'strs':['','4'],'lists':[[],[1,1,1,1]],'dicts':[{},{4:5}]}))
test_split = test_sf.dropna_split()
self.__test_equal(test_split[0], test_sf[2:4].to_dataframe())
self.__test_equal(test_split[1], test_sf[0:2].append(test_sf[4:5]).to_dataframe())
# the 'all' case
self.__test_equal(test_sf.dropna(how='all'), test_sf[0:4].to_dataframe())
test_split = test_sf.dropna_split(how='all')
self.__test_equal(test_split[0], test_sf[0:4].to_dataframe())
self.__test_equal(test_split[1], test_sf[4:5].to_dataframe())
# select some columns
self.__test_equal(test_sf.dropna(['ints','floats'], how='all'), test_sf[1:4].to_dataframe())
test_split = test_sf.dropna_split(['ints','floats'], how='all')
self.__test_equal(test_split[0], test_sf[1:4].to_dataframe())
self.__test_equal(test_split[1], test_sf[0:1].append(test_sf[4:5]).to_dataframe())
self.__test_equal(test_sf.dropna('strs'), test_sf[0:4].to_dataframe())
test_split = test_sf.dropna_split('strs')
self.__test_equal(test_split[0], test_sf[0:4].to_dataframe())
self.__test_equal(test_split[1], test_sf[4:5].to_dataframe())
self.__test_equal(test_sf.dropna(['strs','dicts']), test_sf[0:4].to_dataframe())
test_split = test_sf.dropna_split(['strs','dicts'])
self.__test_equal(test_split[0], test_sf[0:4].to_dataframe())
self.__test_equal(test_split[1], test_sf[4:5].to_dataframe())
# bad stuff
with self.assertRaises(TypeError):
test_sf.dropna(1)
test_sf.dropna([1,2])
test_sf.dropna('strs', how=1)
test_sf.dropna_split(1)
test_sf.dropna_split([1,2])
test_sf.dropna_split('strs', how=1)
with self.assertRaises(ValueError):
test_sf.dropna('ints', how='blah')
test_sf.dropna_split('ints', how='blah')
with self.assertRaises(RuntimeError):
test_sf.dropna('dontexist')
test_sf.dropna_split('dontexist')
def test_add_row_number(self):
sf = SFrame(self.__create_test_df(400000))
sf = sf.add_row_number('id')
self.assertEquals(list(sf['id']), list(range(0,400000)))
del sf['id']
sf = sf.add_row_number('id', -20000)
self.assertEquals(list(sf['id']), list(range(-20000,380000)))
del sf['id']
sf = sf.add_row_number('id', 40000)
self.assertEquals(list(sf['id']), list(range(40000,440000)))
with self.assertRaises(RuntimeError):
sf.add_row_number('id')
with self.assertRaises(TypeError):
sf = sf.add_row_number(46)
sf = sf.add_row_number('id2',start='hi')
def test_check_lazy_sframe_size(self):
# empty sframe, materialized, has_size
sf = SFrame()
self.assertTrue(sf.__is_materialized__())
self.assertTrue(sf.__has_size__())
# add one column, not materialized, has_size
sf['a'] = range(1000)
self.assertTrue(sf.__is_materialized__())
self.assertTrue(sf.__has_size__())
# materialize it, materialized, has_size
sf['a'] = range(1000)
sf.__materialize__()
self.assertTrue(sf.__is_materialized__())
self.assertTrue(sf.__has_size__())
# logical filter, not materialized, not has_size
sf = sf[sf['a'] > 5000]
self.assertFalse(sf.__is_materialized__())
self.assertFalse(sf.__has_size__())
def test_lazy_logical_filter_sarray(self):
g=SArray(range(10000))
g2=SArray(range(10000))
a=g[g>10]
a2=g2[g>10]
z=a[a2>20]
self.assertEqual(len(z), 9979)
def test_lazy_logical_filter_sframe(self):
g=SFrame({'a':range(10000)})
g2=SFrame({'a':range(10000)})
a=g[g['a']>10]
a2=g2[g['a']>10]
z=a[a2['a']>20]
self.assertEqual(len(z), 9979)
def test_sframe_to_rdd(self):
if not HAS_PYSPARK:
print("Did not run Pyspark unit tests!")
return
sc = SparkContext('local')
# Easiest case: single column of integers
test_rdd = sc.parallelize(range(100))
sf = SFrame.from_rdd(test_rdd)
self.assertTrue(sf.num_cols(), 1)
self.assertTrue(sf.column_names(), ['X1'])
# We cast integers to floats to be safe on varying types
self.assertEquals([float(i) for i in range(0,100)], list(sf['X1']))
sc.stop()
def test_rdd_to_sframe(self):
if not HAS_PYSPARK:
print("Did not run Pyspark unit tests!")
return
sc = SparkContext('local')
# Easiest case: single column of integers
sf = SFrame({'column_name':range(100)})
test_rdd = sf.to_rdd(sc)
res = test_rdd.collect()
self.assertEquals(res, [{'column_name':long(i)} for i in range(100)])
sc.stop()
def test_column_manipulation_of_lazy_sframe(self):
g=SFrame({'a':[1,2,3,4,5],'id':[1,2,3,4,5]})
g = g[g['id'] > 2]
del g['id']
# if lazy column deletion is quirky, this will cause an exception
self.assertEquals(list(g[0:2]['a']), [3,4])
g=SFrame({'a':[1,2,3,4,5],'id':[1,2,3,4,5]})
g = g[g['id'] > 2]
g.swap_columns('a','id')
# if lazy column swap is quirky, this will cause an exception
self.assertEquals(list(g[0:2]['a']), [3,4])
def test_empty_sarray(self):
with util.TempDirectory() as f:
sf = SArray()
sf.save(f)
sf2 = SArray(f)
self.assertEquals(len(sf2), 0)
def test_empty_sframe(self):
with util.TempDirectory() as f:
sf = SFrame()
sf.save(f)
sf2 = SFrame(f)
self.assertEquals(len(sf2), 0)
self.assertEquals(sf2.num_columns(), 0)
def test_none_column(self):
sf = SFrame({'a':[1,2,3,4,5]})
sf['b'] = None
self.assertEqual(sf['b'].dtype(), float)
df = pd.DataFrame({'a': [1,2,3,4,5], 'b': [None,None,None,None,None]})
self.__test_equal(sf, df)
sa = SArray.from_const(None, 100)
self.assertEquals(list(sa), [None] * 100)
self.assertEqual(sa.dtype(), float)
def test_apply_with_partial(self):
sf = SFrame({'a': [1, 2, 3, 4, 5]})
def concat_fn(character, row):
return '%s%d' % (character, row['a'])
my_partial_fn = functools.partial(concat_fn, 'x')
sa = sf.apply(my_partial_fn)
self.assertEqual(list(sa), ['x1', 'x2', 'x3', 'x4', 'x5'])
def test_apply_with_functor(self):
sf = SFrame({'a': [1, 2, 3, 4, 5]})
class Concatenator(object):
def __init__(self, character):
self.character = character
def __call__(self, row):
return '%s%d' % (self.character, row['a'])
concatenator = Concatenator('x')
sa = sf.apply(concatenator)
self.assertEqual(list(sa), ['x1', 'x2', 'x3', 'x4', 'x5'])
def test_save_sframe(self):
'''save lazily evaluated SFrame should not matrialize to target folder
'''
data = SFrame()
data['x'] = range(100)
data['x'] = data['x'] > 50
#lazy and good
tmp_dir = tempfile.mkdtemp()
data.save(tmp_dir)
shutil.rmtree(tmp_dir)
print(data)
def test_empty_argmax_does_not_fail(self):
# an empty argmax should not result in a crash
sf = SFrame({'id': [0, 0, 0, 1, 1, 2, 2],
'value': [3.0, 2.0, 2.3, None, None, 4.3, 1.3],
'category': ['A', 'B', 'A', 'E', 'A', 'A', 'B']})
sf.groupby('id', aggregate.ARGMAX('value', 'category'))
def test_cache_invalidation(self):
# Changes to the SFrame should invalidate the indexing cache.
X = SFrame({'a' : range(4000),
'b' : range(4000)})
for i in range(0, 4000, 20):
self.assertEqual(X[i], {'a' : i, 'b' : i})
X['a'] = range(1000, 5000)
for i in range(0, 4000, 20):
self.assertEqual(X[i], {'a' : 1000 + i, 'b' : i})
del X['b']
for i in range(0, 4000, 20):
self.assertEqual(X[i], {'a' : 1000 + i})
X['b'] = X['a']
for i in range(0, 4000, 20):
self.assertEqual(X[i], {'a' : 1000 + i, 'b' : 1000 + i})
X.rename({'b' : 'c'})
for i in range(0, 4000, 20):
self.assertEqual(X[i], {'a' : 1000 + i, 'c' : 1000 + i})
def test_to_numpy(self):
X = SFrame({'a' : range(100),
'b' : range(100)})
import numpy as np
import numpy.testing as nptest
Y = np.transpose(np.array([range(100), range(100)]))
nptest.assert_array_equal(X.to_numpy(), Y)
X['b'] = X['b'].astype(str)
s = [str(i) for i in range(100)]
Y = np.transpose(np.array([s, s]))
nptest.assert_array_equal(X.to_numpy(), Y)
@mock.patch(__name__+'.sqlite3.Cursor', spec=True)
@mock.patch(__name__+'.sqlite3.Connection', spec=True)
def test_from_sql(self, mock_conn, mock_cursor):
# Set up mock connection and cursor
conn = mock_conn('example.db')
curs = mock_cursor()
conn.cursor.return_value = curs
sf_type_codes = [44,44,41,22,114,199,43]
sf_data = list(zip(*self.all_type_cols))
sf_iter = sf_data.__iter__()
def mock_fetchone():
try:
return next(sf_iter)
except StopIteration:
return None
def mock_fetchmany(size=1):
count = 0
ret_list = []
for i in sf_iter:
if count == curs.arraysize:
break
ret_list.append(i)
count += 1
return ret_list
curs.fetchone.side_effect = mock_fetchone
curs.fetchmany.side_effect = mock_fetchmany
curs.description = [['X'+str(i+1),sf_type_codes[i]]+[None for j in range(5)] for i in range(len(sf_data[0]))]
# bigger than cache, no Nones
sf = SFrame.from_sql(conn, "SELECT * FROM test_table", type_inference_rows=5, dbapi_module=dbapi2_mock())
_assert_sframe_equal(sf, self.sf_all_types)
# smaller than cache, no Nones
sf_iter = sf_data.__iter__()
sf = SFrame.from_sql(conn, "SELECT * FROM test_table", type_inference_rows=100, dbapi_module=dbapi2_mock())
_assert_sframe_equal(sf, self.sf_all_types)
none_col = [None for i in range(5)]
nones_in_cache = list(zip(*[none_col for i in range(len(sf_data[0]))]))
none_sf = SFrame({'X'+str(i):none_col for i in range(1,len(sf_data[0])+1)})
test_data = (nones_in_cache+sf_data)
sf_iter = test_data.__iter__()
# more None rows than cache & types in description
sf = SFrame.from_sql(conn, "SELECT * FROM test_table", type_inference_rows=5, dbapi_module=dbapi2_mock())
sf_inferred_types = SFrame()
expected_types = [float,float,str,str,str,str,dt.datetime]
for i in zip(self.sf_all_types.column_names(),expected_types):
new_col = SArray(none_col).astype(i[1])
new_col = new_col.append(self.sf_all_types[i[0]].apply(lambda x: i[1](x) if i[1] is not dt.datetime else x))
sf_inferred_types.add_column(new_col)
# Don't test the string representation of dict and list; there are
# funky consistency issues with the string representations of these
sf.remove_columns(['X5', 'X6'])
sf_inferred_types.remove_columns(['X5', 'X6'])
_assert_sframe_equal(sf, sf_inferred_types)
# more None rows than cache & no type information
for i in range(len(curs.description)):
curs.description[i][1] = None
sf_iter = test_data.__iter__()
sf = SFrame.from_sql(conn, "SELECT * FROM test_table", type_inference_rows=5, dbapi_module=dbapi2_mock())
sf_inferred_types = SFrame()
expected_types = [str for i in range(len(sf_data[0]))]
for i in zip(self.sf_all_types.column_names(),expected_types):
new_col = SArray(none_col).astype(i[1])
new_col = new_col.append(self.sf_all_types[i[0]].apply(lambda x: str(x)))
sf_inferred_types.add_column(new_col)
# Don't test the string representation of dict, could be out of order
sf.remove_columns(['X5', 'X6'])
sf_inferred_types.remove_columns(['X5', 'X6'])
_assert_sframe_equal(sf, sf_inferred_types)
### column_type_hints tests
sf_iter = test_data.__iter__()
sf = SFrame.from_sql(conn, "SELECT * FROM test_table", type_inference_rows=5,
dbapi_module=dbapi2_mock(), column_type_hints=str)
sf.remove_columns(['X5', 'X6'])
_assert_sframe_equal(sf, sf_inferred_types)
# Provide unhintable types
sf_iter = test_data.__iter__()
expected_types = [int,float,str,array.array,list,dict,dt.datetime]
with self.assertRaises(TypeError):
sf = SFrame.from_sql(conn,
"SELECT * FROM test_table", type_inference_rows=5,
dbapi_module=dbapi2_mock(), column_type_hints=expected_types)
sf_iter = test_data.__iter__()
expected_types = {'X'+str(i+1):expected_types[i] for i in range(3)}
sf = SFrame.from_sql(conn,
"SELECT * FROM test_table", type_inference_rows=10,
dbapi_module=dbapi2_mock(), column_type_hints=expected_types)
_assert_sframe_equal(sf[5:],self.sf_all_types)
# Test a float forced to a str
sf_iter = test_data.__iter__()
expected_types['X2'] = str
self.sf_all_types['X2'] = self.sf_all_types['X2'].apply(lambda x: str(x))
sf = SFrame.from_sql(conn,
"SELECT * FROM test_table", type_inference_rows=10,
dbapi_module=dbapi2_mock(), column_type_hints=expected_types)
_assert_sframe_equal(sf[5:],self.sf_all_types)
# Type unsupported by sframe
curs.description = [['X1',44],['X2',44]]
sf_iter = [[complex(4.5,3),1], [complex(3.4,5),2]].__iter__()
sf = SFrame.from_sql(conn, "SELECT * FROM test_table")
expected_sf = SFrame({'X1':["(4.5+3j)","(3.4+5j)"],'X2':[1,2]})
_assert_sframe_equal(sf, expected_sf)
# bad DBAPI version!
bad_version = dbapi2_mock()
bad_version.apilevel = "1.0 "
with self.assertRaises(NotImplementedError):
sf = SFrame.from_sql(conn, "SELECT * FROM test_table", dbapi_module=bad_version)
# Bad module
with self.assertRaises(AttributeError):
sf = SFrame.from_sql(conn, "SELECT * FROM test_table", dbapi_module=os)
# Bad connection
with self.assertRaises(AttributeError):
sf = SFrame.from_sql(4, "SELECT * FROM test_table")
# Empty query result
curs.description = []
sf = SFrame.from_sql(conn, "SELECT * FROM test_table", dbapi_module=dbapi2_mock())
_assert_sframe_equal(sf, SFrame())
@mock.patch(__name__+'.sqlite3.Cursor', spec=True)
@mock.patch(__name__+'.sqlite3.Connection', spec=True)
def test_to_sql(self, mock_conn, mock_cursor):
conn = mock_conn('example.db')
curs = mock_cursor()
insert_stmt = "INSERT INTO ins_test (X1,X2,X3,X4,X5,X6,X7) VALUES ({0},{1},{2},{3},{4},{5},{6})"
num_cols = len(self.sf_all_types.column_names())
test_cases = [
('qmark',insert_stmt.format(*['?' for i in range(num_cols)])),
('numeric',insert_stmt.format(*[':'+str(i) for i in range(1,num_cols+1)])),
('named',insert_stmt.format(*[':X'+str(i) for i in range(1,num_cols+1)])),
('format',insert_stmt.format(*['%s' for i in range(num_cols)])),
('pyformat',insert_stmt.format(*['%(X'+str(i)+')s' for i in range(1,num_cols+1)])),
]
for i in test_cases:
conn.cursor.return_value = curs
mock_mod = dbapi2_mock()
mock_mod.paramstyle = i[0]
self.sf_all_types.to_sql(conn, "ins_test", dbapi_module=mock_mod)
conn.cursor.assert_called_once_with()
calls = []
col_names = self.sf_all_types.column_names()
for j in self.sf_all_types:
if i[0] == 'named' or i[0] == 'pyformat':
calls.append(mock.call(i[1],j))
else:
calls.append(mock.call(i[1],[j[k] for k in col_names]))
curs.execute.assert_has_calls(calls, any_order=False)
self.assertEquals(curs.execute.call_count, len(self.sf_all_types))
conn.commit.assert_called_once_with()
curs.close.assert_called_once_with()
conn.reset_mock()
curs.reset_mock()
# bad DBAPI version!
bad_version = dbapi2_mock()
bad_version.apilevel = "1.0 "
with self.assertRaises(NotImplementedError):
self.sf_all_types.to_sql(conn, "ins_test", dbapi_module=bad_version)
# bad paramstyle
bad_paramstyle = dbapi2_mock()
bad_paramstyle.paramstyle = 'foo'
with self.assertRaises(TypeError):
self.sf_all_types.to_sql(conn, "ins_test", dbapi_module=bad_paramstyle)
def test_materialize(self):
sf = SFrame({'a':range(100)})
sf = sf[sf['a'] > 10]
self.assertFalse(sf.is_materialized())
sf.materialize()
self.assertTrue(sf.is_materialized())
def test_materialization_slicing(self):
# Has been known to fail.
g=SFrame({'a':range(100)})[:10]
g['b'] = g['a'] + 1
g['b'].materialize()
g.materialize()
def test_copy(self):
from copy import copy
sf = generate_random_sframe(100, "Cns")
sf_copy = copy(sf)
assert sf is not sf_copy
_assert_sframe_equal(sf, sf_copy)
def test_deepcopy(self):
from copy import deepcopy
sf = generate_random_sframe(100, "Cns")
sf_copy = deepcopy(sf)
assert sf is not sf_copy
_assert_sframe_equal(sf, sf_copy)
if __name__ == "__main__":
import sys
# Check if we are supposed to connect to another server
for i, v in enumerate(sys.argv):
if v.startswith("ipc://"):
_launch(v)
# The rest of the arguments need to get passed through to
# the unittest module
del sys.argv[i]
break
unittest.main()
| bsd-3-clause |
hitszxp/scikit-learn | examples/applications/wikipedia_principal_eigenvector.py | 41 | 7742 | """
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
http://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
http://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in the scikit.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from sklearn.externals.joblib import Memory
print(__doc__)
###############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
import urllib
print("Downloading data from '%s', please wait..." % url)
opener = urllib.urlopen(url)
open(filename, 'wb').write(opener.read())
print()
###############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in index_map.iteritems())
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest compenents of the the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <[email protected]>
Dan Schult <[email protected]>
Pieter Swart <[email protected]>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
| bsd-3-clause |
JWarmenhoven/seaborn | seaborn/tests/test_palettes.py | 2 | 9884 | import warnings
import colorsys
import numpy as np
import matplotlib as mpl
import nose.tools as nt
import numpy.testing as npt
from .. import palettes, utils, rcmod
from ..external import husl
from ..xkcd_rgb import xkcd_rgb
from ..crayons import crayons
class TestColorPalettes(object):
def test_current_palette(self):
pal = palettes.color_palette(["red", "blue", "green"], 3)
rcmod.set_palette(pal, 3)
nt.assert_equal(pal, utils.get_color_cycle())
rcmod.set()
def test_palette_context(self):
default_pal = palettes.color_palette()
context_pal = palettes.color_palette("muted")
with palettes.color_palette(context_pal):
nt.assert_equal(utils.get_color_cycle(), context_pal)
nt.assert_equal(utils.get_color_cycle(), default_pal)
def test_big_palette_context(self):
original_pal = palettes.color_palette("deep", n_colors=8)
context_pal = palettes.color_palette("husl", 10)
rcmod.set_palette(original_pal)
with palettes.color_palette(context_pal, 10):
nt.assert_equal(utils.get_color_cycle(), context_pal)
nt.assert_equal(utils.get_color_cycle(), original_pal)
# Reset default
rcmod.set()
def test_seaborn_palettes(self):
pals = "deep", "muted", "pastel", "bright", "dark", "colorblind"
for name in pals:
pal_out = palettes.color_palette(name)
nt.assert_equal(len(pal_out), 6)
def test_hls_palette(self):
hls_pal1 = palettes.hls_palette()
hls_pal2 = palettes.color_palette("hls")
npt.assert_array_equal(hls_pal1, hls_pal2)
def test_husl_palette(self):
husl_pal1 = palettes.husl_palette()
husl_pal2 = palettes.color_palette("husl")
npt.assert_array_equal(husl_pal1, husl_pal2)
def test_mpl_palette(self):
mpl_pal1 = palettes.mpl_palette("Reds")
mpl_pal2 = palettes.color_palette("Reds")
npt.assert_array_equal(mpl_pal1, mpl_pal2)
def test_mpl_dark_palette(self):
mpl_pal1 = palettes.mpl_palette("Blues_d")
mpl_pal2 = palettes.color_palette("Blues_d")
npt.assert_array_equal(mpl_pal1, mpl_pal2)
def test_bad_palette_name(self):
with nt.assert_raises(ValueError):
palettes.color_palette("IAmNotAPalette")
def test_terrible_palette_name(self):
with nt.assert_raises(ValueError):
palettes.color_palette("jet")
def test_bad_palette_colors(self):
pal = ["red", "blue", "iamnotacolor"]
with nt.assert_raises(ValueError):
palettes.color_palette(pal)
def test_palette_desat(self):
pal1 = palettes.husl_palette(6)
pal1 = [utils.desaturate(c, .5) for c in pal1]
pal2 = palettes.color_palette("husl", desat=.5)
npt.assert_array_equal(pal1, pal2)
def test_palette_is_list_of_tuples(self):
pal_in = np.array(["red", "blue", "green"])
pal_out = palettes.color_palette(pal_in, 3)
nt.assert_is_instance(pal_out, list)
nt.assert_is_instance(pal_out[0], tuple)
nt.assert_is_instance(pal_out[0][0], float)
nt.assert_equal(len(pal_out[0]), 3)
def test_palette_cycles(self):
deep = palettes.color_palette("deep")
double_deep = palettes.color_palette("deep", 12)
nt.assert_equal(double_deep, deep + deep)
def test_hls_values(self):
pal1 = palettes.hls_palette(6, h=0)
pal2 = palettes.hls_palette(6, h=.5)
pal2 = pal2[3:] + pal2[:3]
npt.assert_array_almost_equal(pal1, pal2)
pal_dark = palettes.hls_palette(5, l=.2)
pal_bright = palettes.hls_palette(5, l=.8)
npt.assert_array_less(list(map(sum, pal_dark)),
list(map(sum, pal_bright)))
pal_flat = palettes.hls_palette(5, s=.1)
pal_bold = palettes.hls_palette(5, s=.9)
npt.assert_array_less(list(map(np.std, pal_flat)),
list(map(np.std, pal_bold)))
def test_husl_values(self):
pal1 = palettes.husl_palette(6, h=0)
pal2 = palettes.husl_palette(6, h=.5)
pal2 = pal2[3:] + pal2[:3]
npt.assert_array_almost_equal(pal1, pal2)
pal_dark = palettes.husl_palette(5, l=.2)
pal_bright = palettes.husl_palette(5, l=.8)
npt.assert_array_less(list(map(sum, pal_dark)),
list(map(sum, pal_bright)))
pal_flat = palettes.husl_palette(5, s=.1)
pal_bold = palettes.husl_palette(5, s=.9)
npt.assert_array_less(list(map(np.std, pal_flat)),
list(map(np.std, pal_bold)))
def test_cbrewer_qual(self):
pal_short = palettes.mpl_palette("Set1", 4)
pal_long = palettes.mpl_palette("Set1", 6)
nt.assert_equal(pal_short, pal_long[:4])
pal_full = palettes.mpl_palette("Set2", 8)
pal_long = palettes.mpl_palette("Set2", 10)
nt.assert_equal(pal_full, pal_long[:8])
def test_mpl_reversal(self):
pal_forward = palettes.mpl_palette("BuPu", 6)
pal_reverse = palettes.mpl_palette("BuPu_r", 6)
npt.assert_array_almost_equal(pal_forward, pal_reverse[::-1])
def test_rgb_from_hls(self):
color = .5, .8, .4
rgb_got = palettes._color_to_rgb(color, "hls")
rgb_want = colorsys.hls_to_rgb(*color)
nt.assert_equal(rgb_got, rgb_want)
def test_rgb_from_husl(self):
color = 120, 50, 40
rgb_got = palettes._color_to_rgb(color, "husl")
rgb_want = husl.husl_to_rgb(*color)
nt.assert_equal(rgb_got, rgb_want)
def test_rgb_from_xkcd(self):
color = "dull red"
rgb_got = palettes._color_to_rgb(color, "xkcd")
rgb_want = xkcd_rgb[color]
nt.assert_equal(rgb_got, rgb_want)
def test_light_palette(self):
pal_forward = palettes.light_palette("red")
pal_reverse = palettes.light_palette("red", reverse=True)
npt.assert_array_almost_equal(pal_forward, pal_reverse[::-1])
red = tuple(mpl.colors.colorConverter.to_rgba("red"))
nt.assert_equal(tuple(pal_forward[-1]), red)
pal_cmap = palettes.light_palette("blue", as_cmap=True)
nt.assert_is_instance(pal_cmap, mpl.colors.LinearSegmentedColormap)
def test_dark_palette(self):
pal_forward = palettes.dark_palette("red")
pal_reverse = palettes.dark_palette("red", reverse=True)
npt.assert_array_almost_equal(pal_forward, pal_reverse[::-1])
red = tuple(mpl.colors.colorConverter.to_rgba("red"))
nt.assert_equal(tuple(pal_forward[-1]), red)
pal_cmap = palettes.dark_palette("blue", as_cmap=True)
nt.assert_is_instance(pal_cmap, mpl.colors.LinearSegmentedColormap)
def test_blend_palette(self):
colors = ["red", "yellow", "white"]
pal_cmap = palettes.blend_palette(colors, as_cmap=True)
nt.assert_is_instance(pal_cmap, mpl.colors.LinearSegmentedColormap)
def test_cubehelix_against_matplotlib(self):
x = np.linspace(0, 1, 8)
mpl_pal = mpl.cm.cubehelix(x)[:, :3].tolist()
sns_pal = palettes.cubehelix_palette(8, start=0.5, rot=-1.5, hue=1,
dark=0, light=1, reverse=True)
nt.assert_list_equal(sns_pal, mpl_pal)
def test_cubehelix_n_colors(self):
for n in [3, 5, 8]:
pal = palettes.cubehelix_palette(n)
nt.assert_equal(len(pal), n)
def test_cubehelix_reverse(self):
pal_forward = palettes.cubehelix_palette()
pal_reverse = palettes.cubehelix_palette(reverse=True)
nt.assert_list_equal(pal_forward, pal_reverse[::-1])
def test_cubehelix_cmap(self):
cmap = palettes.cubehelix_palette(as_cmap=True)
nt.assert_is_instance(cmap, mpl.colors.ListedColormap)
pal = palettes.cubehelix_palette()
x = np.linspace(0, 1, 6)
npt.assert_array_equal(cmap(x)[:, :3], pal)
cmap_rev = palettes.cubehelix_palette(as_cmap=True, reverse=True)
x = np.linspace(0, 1, 6)
pal_forward = cmap(x).tolist()
pal_reverse = cmap_rev(x[::-1]).tolist()
nt.assert_list_equal(pal_forward, pal_reverse)
def test_xkcd_palette(self):
names = list(xkcd_rgb.keys())[10:15]
colors = palettes.xkcd_palette(names)
for name, color in zip(names, colors):
as_hex = mpl.colors.rgb2hex(color)
nt.assert_equal(as_hex, xkcd_rgb[name])
def test_crayon_palette(self):
names = list(crayons.keys())[10:15]
colors = palettes.crayon_palette(names)
for name, color in zip(names, colors):
as_hex = mpl.colors.rgb2hex(color)
nt.assert_equal(as_hex, crayons[name].lower())
def test_color_codes(self):
palettes.set_color_codes("deep")
colors = palettes.color_palette("deep") + [".1"]
for code, color in zip("bgrmyck", colors):
rgb_want = mpl.colors.colorConverter.to_rgb(color)
rgb_got = mpl.colors.colorConverter.to_rgb(code)
nt.assert_equal(rgb_want, rgb_got)
palettes.set_color_codes("reset")
def test_as_hex(self):
pal = palettes.color_palette("deep")
for rgb, hex in zip(pal, pal.as_hex()):
nt.assert_equal(mpl.colors.rgb2hex(rgb), hex)
def test_preserved_palette_length(self):
pal_in = palettes.color_palette("Set1", 10)
pal_out = palettes.color_palette(pal_in)
nt.assert_equal(pal_in, pal_out)
def test_get_color_cycle(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
result = utils.get_color_cycle()
expected = mpl.rcParams['axes.color_cycle']
nt.assert_equal(result, expected)
| bsd-3-clause |
mjescobar/RF_Estimation | Utils/Scripts_Gratings/Process_Gratings.py | 1 | 7579 | import numpy as np
import matplotlib.pylab as plt
##125
### V2
value_E11a_v2=[0.0472727273,0.0113636364,0.0140909091,0.0477272727,0.0259090909,0.0186363636,0.0254545455,0.0227272727,0.0472727273]
value_E7a_v2=[0.1340909091,0.125,0.145,0.1086363636,0.0840909091,0.1022727273,0.1127272727,0.1186363636,0.1340909091]
value_E8a_v2=[0.03,0.0259090909,0.0109090909,0.0277272727,0.0318181818,0.0263636364,0.0204545455,0.0222727273,0.03]
value_F11a_v2=[0.1531818182,0.12,0.1309090909,0.1631818182,0.2240909091,0.155,0.1581818182,0.0913636364,0.1531818182]
value_F12a_v2=[0.0259090909,0.0254545455,0.035,0.0259090909,0.0209090909,0.0140909091,0.0563636364,0.0240909091,0.0259090909]
value_E7b_v2=[0.0318181818,0.0172727273,0.0077272727,0.0322727273,0.0345454545,0.0181818182,0.0168181818,0.0236363636,0.0318181818]
value_H10b_v2=[0.0540909091,0.025,0.0240909091,0.0063636364,0.0004545455,0.0031818182,0.0036363636,0.0095454545,0.0540909091]
value_K10b_v2=[0.0204545455,0.0140909091,0.0204545455,0.0195454545,0.0186363636,0.0181818182,0.0213636364,0.0113636364,0.0204545455]
value_K5a_v2=[0.0259090909,0.0213636364,0.0245454545,0.0327272727,0.0218181818,0.0145454545,0.0154545455,0.0372727273,0.0259090909]
value_L11a_v2=[0.1418181818,0.05,0.0277272727,0.0345454545,0.0213636364,0.0209090909,0.1568181818,0.1313636364,0.1418181818]
value_M14a_v2=[0.1140909091,0.12,0.0822727273,0.1031818182,0.1168181818,0.105,0.0904545455,0.0486363636,0.1140909091]
value_M9c_v2=[0.0336363636,0.0086363636,0.0154545455,0.0363636364,0.0768181818,0.0981818182,0.0413636364,0.01,0.0336363636]
### V4
value_E11a_v4=[20.0,8.18181818182,15.9090909091,16.3636363636,15.0,11.8181818182,12.2727272727,15.4545454545,20.0]
value_E7a_v4=[114.090909091,98.1818181818,101.363636364,124.545454545,111.818181818,76.8181818182,96.8181818182,112.727272727,114.090909091]
value_E8a_v4=[4.09090909091,4.54545454545,11.8181818182,9.54545454545,9.09090909091,4.54545454545,8.18181818182,10.0,4.09090909091]
value_F11a_v4=[100.909090909,60.4545454545,66.8181818182,95.9090909091,150.909090909,72.2727272727,62.2727272727,52.2727272727,100.909090909]
value_F12a_v4=[8.63636363636,11.8181818182,28.6363636364,10.9090909091,15.9090909091,4.54545454545,10.9090909091,17.2727272727,8.63636363636]
value_E7b_v4=[25.4545454545,14.5454545455,10.9090909091,4.54545454545,33.1818181818,5.90909090909,11.3636363636,13.6363636364,25.4545454545]
value_H10b_v4=[13.6363636364,20.9090909091,21.3636363636,7.72727272727,0.454545454545,1.36363636364,0.0,10.4545454545,13.6363636364]
value_K10b_v4=[0.0,4.54545454545,8.63636363636,1.81818181818,1.81818181818,5.45454545455,11.3636363636,13.1818181818,0.0]
value_K5a_v4=[13.6363636364,0.909090909091,10.4545454545,31.3636363636,10.9090909091,4.09090909091,5.90909090909,18.6363636364,13.6363636364]
value_L11a_v4=[18.1818181818,16.8181818182,6.81818181818,9.54545454545,3.63636363636,7.72727272727,58.1818181818,69.0909090909,18.1818181818]
value_M14a_v4=[27.7272727273,39.5454545455,32.7272727273,75.0,62.2727272727,53.1818181818,47.7272727273,66.3636363636,27.7272727273]
value_M9c_v4=[7.72727272727,0.454545454545,0.909090909091,8.63636363636,52.2727272727,88.1818181818,39.5454545455,0.454545454545,7.72727272727]
### V8
value_E11a_v8=[0,0.0004545455,0,0.0004545455,0,0,0.0004545455,0,0]
value_E7a_v8=[0.0736698347,0.1245454545,0.1113636364,0.1409090909,0.1413636364,0.1236363636,0.1309090909,0.1181818182,0.0736698347]
value_E8a_v8=[0.0004547521,0.0009090909,0.0009090909,0.0004545455,0.0018181818,0.0009090909,0,0.0009090909,0.0004547521]
value_F11a_v8=[0.0159163223,0.0168181818,0.0140909091,0.0204545455,0.0218181818,0.02,0.0168181818,0.0181818182,0.0159163223]
value_F12a_v8=[0,0.0018181818,0.005,0.0018181818,0.0036363636,0,0.0036363636,0.0022727273,0]
value_E7b_v8=[0.0009095041,0.0045454545,0.01,0.0022727273,0.005,0.0027272727,0.0054545455,0.0031818182,0.0009095041]
value_H10b_v8=[0,0.0022727273,0,0,0.0013636364,0.0004545455,0,0,0]
value_K10b_v8=[0,0,0,0,0,0,0,0,0]
value_K5a_v8=[0,0.0009090909,0,0.0072727273,0.0022727273,0,0,0,0]
value_L11a_v8=[0.0022737603,0.0136363636,0.0154545455,0.0068181818,0.0045454545,0.0159090909,0.03,0.0186363636,0.0022737603]
value_M14a_v8=[0.0113688017,0.0109090909,0.0145454545,0.0127272727,0.0113636364,0.0072727273,0.0168181818,0.0072727273,0.0113688017]
value_M9c_v8=[0.0009095041,0.0018181818,0.0004545455,0.0004545455,0.0027272727,0.005,0.0036363636,0,0.0009095041]
#250
### Graficado
angle = [0, 45, 90, 135, 180, 225, 270, 315, 0]
angle = np.array(angle)*np.pi/180
plt.figure('Preferred angles V2')
plt.polar(angle, value_E11a_v2, 'b', linewidth=3, label='E11a_v2')
plt.polar(angle, value_H10b_v2, 'g', linewidth=3, label='H10b_v2')
plt.polar(angle, value_K5a_v2, 'yellow', linewidth=3, label='K5a_v2')
plt.polar(angle, value_F12a_v2, 'r', linewidth=3, label='F12a_v2')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.figure('Preferred angles V8')
plt.polar(angle, value_E11a_v8, 'b', linewidth=3, label='E11a_v8')
plt.polar(angle, value_H10b_v8, 'g', linewidth=3, label='H10b_v8')
plt.polar(angle, value_K5a_v8, 'yellow', linewidth=3, label='K5a_v8')
#plt.polar(angle, value_F12a_v8, 'r', linewidth=3, label='F12a_v8')
#plt.polar(angle, value_E7b_v8, 'orange', linewidth=3, label='E7b_v8')
#plt.polar(angle, value_E8a_v8, 'black', linewidth=3, label='E8a_v8')
plt.polar(angle, value_K10b_v8, 'cyan', linewidth=3, label='K10b_v8')
#plt.polar(angle, value_F11a_v8, 'brown', linewidth=3, label='F11a_v8')
#plt.polar(angle, value_E7a_v8, 'lime', linewidth=3, label='E7a_v8')
#plt.polar(angle, value_M14a_v8, 'pink', linewidth=3, label='M14a_v8')
plt.polar(angle, value_M9c_v8, 'magenta', linewidth=3, label='M9c_v8')
#plt.polar(angle, value_L11a_v8, 'royalBlue', linewidth=3, label='L11a_v8')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.figure('V8 vs V2 : unit M9c preferred angle 225')
plt.subplot(211, polar=True)
plt.polar(angle, value_M9c_v8, 'r', linewidth=3, label='M9c_v8')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.subplot(212, polar=True)
plt.polar(angle, value_M9c_v2, 'b', linewidth=3, label='M9c_v2')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.figure('V8 _ not preferred')
plt.polar(angle, value_H10b_v8, 'g', linewidth=3, label='H10b_v8')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.figure('V8 greater firing rate')
plt.polar(angle, value_E7b_v8, 'orange', linewidth=3, label='E7b_v8')
#plt.polar(angle, value_F11a_v8, 'brown', linewidth=3, label='F11a_v8')
#plt.polar(angle, value_E7a_v8, 'lime', linewidth=3, label='E7a_v8')
plt.polar(angle, value_M14a_v8, 'pink', linewidth=3, label='M14a_v8')
plt.polar(angle, value_L11a_v8, 'royalBlue', linewidth=3, label='L11a_v8')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.figure('V2 _ Preferred regions???')
plt.polar(angle, value_E7b_v2, 'r', linewidth=3, label='E7b_v2')
plt.polar(angle, value_E8a_v2, 'b', linewidth=3, label='E8a_v2')
#plt.polar(angle, value_K10b_v2, 'g', linewidth=3, label='K10b_v2')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.figure('Preferred angles V2 more spikes')
plt.polar(angle, value_F11a_v2, 'b', linewidth=3, label='F11a_v2')
plt.polar(angle, value_E7a_v2, 'g', linewidth=3, label='E7a_v2')
plt.polar(angle, value_M14a_v2, 'r', linewidth=3, label='M14a_v2')
plt.polar(angle, value_M9c_v2, 'yellow', linewidth=3, label='M9c_v2')
plt.polar(angle, value_L11a_v2, 'royalBlue', linewidth=3, label='L11a_v2')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
| gpl-2.0 |
legacysurvey/obiwan | py/obiwan/qa/number_finished_by_slurm.py | 1 | 1248 | """
Given a qdo slurm log file, this computes the fraction of launched bricks
that resulted in a tractor table to the total number of launched bricksself.
This is a useful test of scaling. More nodes with more time should create
more tractor catalogues than less nodes with less time.
"""
import pandas as pd
import os
import re
def add_fits(text):
return text+'.fits'
def trac_fns(slurm_fn):
"""Map the string 'logging to' to the tractor catalogue name that Should
be created for that job
"""
with open(slurm_fn,'r') as foo:
text= foo.read()
return (pd.Series(re.findall(r'Logging to:.*?\n',text))
.str.replace(r'Logging to:\s','')
.str.strip()
.str.replace('logs','tractor')
.str.replace(r'log\.','tractor-')
.apply(add_fits)
).values
if __name__ == '__main__':
from argparse import ArgumentParser
parser= ArgumentParser()
parser.add_argument('--slurm_fn', type=str, required=True)
args = parser.parse_args()
tractor_fns= trac_fns(args.slurm_fn)
cnts= [1 if os.path.exists(fn) else 0
for fn in tractor_fns]
print('1: tractor.fits exists, 0: doesnt')
print(pd.Series(cnts).value_counts())
| bsd-3-clause |
cogmission/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_gtkcairo.py | 69 | 2207 | """
GTK+ Matplotlib interface using cairo (not GDK) drawing operations.
Author: Steve Chaplin
"""
import gtk
if gtk.pygtk_version < (2,7,0):
import cairo.gtk
from matplotlib.backends import backend_cairo
from matplotlib.backends.backend_gtk import *
backend_version = 'PyGTK(%d.%d.%d) ' % gtk.pygtk_version + \
'Pycairo(%s)' % backend_cairo.backend_version
_debug = False
#_debug = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if _debug: print 'backend_gtkcairo.%s()' % fn_name()
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasGTKCairo(thisFig)
return FigureManagerGTK(canvas, num)
class RendererGTKCairo (backend_cairo.RendererCairo):
if gtk.pygtk_version >= (2,7,0):
def set_pixmap (self, pixmap):
self.ctx = pixmap.cairo_create()
self.ctx.save() # restore, save - when call new_gc()
else:
def set_pixmap (self, pixmap):
self.ctx = cairo.gtk.gdk_cairo_create (pixmap)
self.ctx.save() # restore, save - when call new_gc()
class FigureCanvasGTKCairo(backend_cairo.FigureCanvasCairo, FigureCanvasGTK):
filetypes = FigureCanvasGTK.filetypes.copy()
filetypes.update(backend_cairo.FigureCanvasCairo.filetypes)
def _renderer_init(self):
"""Override to use cairo (rather than GDK) renderer"""
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
self._renderer = RendererGTKCairo (self.figure.dpi)
class FigureManagerGTKCairo(FigureManagerGTK):
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='classic':
toolbar = NavigationToolbar (canvas, self.window)
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2GTKCairo (canvas, self.window)
else:
toolbar = None
return toolbar
class NavigationToolbar2Cairo(NavigationToolbar2GTK):
def _get_canvas(self, fig):
return FigureCanvasGTKCairo(fig)
| agpl-3.0 |
ldirer/scikit-learn | sklearn/svm/tests/test_sparse.py | 63 | 13366 | import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import (assert_raises, assert_true, assert_false,
assert_warns, assert_raise_message,
ignore_warnings)
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
if isinstance(dense_svm, svm.OneClassSVM):
msg = "cannot use sparse input in 'OneClassSVM' trained on dense data"
else:
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
svc = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo')
clf = svc.fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_oneclasssvm():
"""Check that sparse OneClassSVM gives the same result as dense OneClassSVM"""
# many class dataset:
X_blobs, _ = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, None, T], [X2_sp, None, T2],
[X_blobs[:80], None, X_blobs[80:]],
[iris.data, None, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.OneClassSVM(kernel=kernel, random_state=0)
sp_clf = svm.OneClassSVM(kernel=kernel, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catches some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
with ignore_warnings(category=ConvergenceWarning):
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
with ignore_warnings(category=ConvergenceWarning):
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
| bsd-3-clause |
NMTHydro/Recharge | utils/ndvi_linear_interpolation.py | 1 | 11551 | # ===============================================================================
# Copyright 2016 gabe-parrish
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= standard library imports ========================
import os
from scipy.interpolate import CubicSpline
import matplotlib.pyplot as plt
import numpy as np
import gdal
import seaborn
from jdcal import gcal2jd, jd2gcal
# ============= local library imports ===========================
def calculate_julian_date(year, julian_doy):
""""""
first_of_the_year = int(sum(gcal2jd(year, 1, 1)))
# print "first day jd", first_of_the_year
julian_date = first_of_the_year + int(julian_doy)
# print "full julian date", julian_date
return julian_date
def format_date(date, year):
""""""
year_jd = int(sum(gcal2jd(year, 1, 1)))
# print "year 2000", year_2000
date = date - year_jd
d = jd2gcal(year_jd, date)
print "datedate", d
# test
for i in (1, 10, 100):
print "{num:02d}".format(num=i)
date_string = "{}_{a:02d}_{b:02d}".format(d[0], a=d[1], b=d[2])
return date_string
def read_files(file_list):
""""""
# erdas imagine driver..
driver = gdal.GetDriverByName('HFA')
# must register the driver before you can use it.
driver.Register()
# now we can open files
img_file_list = []
for img_file in file_list:
img_obj = gdal.Open(img_file)
if img_obj is None:
print 'Couldnt open' + img_file
gdal.sys.exit(1)
img_file_list.append(img_obj)
return img_file_list
def format_list(lst):
length = len(lst)
print "length of list", length
print "heres the list", lst
print "range {}".format(range(length))
tuple_list = []
for i in range(length):
if i < length-1:
tupper = (lst[i],lst[i+1])
tuple_list.append(tupper)
elif i == range(length):
tupper = (lst[i - 1], lst[i])
tuple_list.append(tupper)
print "tuple list {}".format(tuple_list)
return tuple_list
def findRasterIntersect(raster1, raster2):
# load data
band1 = raster1.GetRasterBand(1)
band2 = raster2.GetRasterBand(1)
gt1 = raster1.GetGeoTransform()
# print "here is geotransform 1 {}".format(gt1)
gt2 = raster2.GetGeoTransform()
# print "here is geotransform 1 {}".format(gt2)
# print "raster1.RasterXSize = {}".format(raster1.RasterXSize)
# print "raster1.RasterYSize = {}".format(raster1.RasterYSize)
# print "raster2.RasterXSize = {}".format(raster2.RasterXSize)
# print "raster2.RasterYSize = {}".format(raster2.RasterYSize)
# find each image's bounding box
# r1 has left, top, right, bottom of dataset's bounds in geospatial coordinates.
r1 = [gt1[0], gt1[3], gt1[0] + (gt1[1] * raster1.RasterXSize), gt1[3] + (gt1[5] * raster1.RasterYSize)]
r2 = [gt2[0], gt2[3], gt2[0] + (gt2[1] * raster2.RasterXSize), gt2[3] + (gt2[5] * raster2.RasterYSize)]
print '\t1 bounding box: %s' % str(r1)
print '\t2 bounding box: %s' % str(r2)
test_list = [r1[0], r2[0]]
# find intersection between bounding boxes
intersection = [max(test_list), min(r1[1], r2[1]), min(r1[2], r2[2]), max(r1[3], r2[3])]
if r1 != r2:
print '\t** different bounding boxes **'
# check for any overlap at all...
if (intersection[2] < intersection[0]) or (intersection[1] < intersection[3]):
intersection = None
print '\t*** no overlap ***'
return
else:
print '\tintersection:', intersection
left1 = int(round((intersection[0] - r1[0]) / gt1[1])) # difference divided by pixel dimension
top1 = int(round((intersection[1] - r1[1]) / gt1[5]))
col1 = int(round((intersection[2] - r1[0]) / gt1[1])) - left1 # difference minus offset left
row1 = int(round((intersection[3] - r1[1]) / gt1[5])) - top1
print "left 1: {}, top1: {}, col1: {}, row1: {}".format(left1, top1, col1, row1)
left2 = int(round((intersection[0] - r2[0]) / gt2[1])) # difference divided by pixel dimension
top2 = int(round((intersection[1] - r2[1]) / gt2[5]))
col2 = int(round((intersection[2] - r2[0]) / gt2[1])) - left2 # difference minus new left offset
row2 = int(round((intersection[3] - r2[1]) / gt2[5])) - top2
# print '\tcol1:',col1,'row1:',row1,'col2:',col2,'row2:',row2
if col1 != col2 or row1 != row2:
print "*** MEGA ERROR *** COLS and ROWS DO NOT MATCH ***"
# these arrays should now have the same spatial geometry though NaNs may differ
array1 = band1.ReadAsArray(left1, top1, col1, row1)
array2 = band2.ReadAsArray(left2, top2, col2, row2)
else: # same dimensions from the get go
print "same dimensions from the get go..."
col1 = raster1.RasterXSize # = col2
row1 = raster1.RasterYSize # = row2
array1 = band1.ReadAsArray()
array2 = band2.ReadAsArray()
return array1, array2, col1, row1, intersection
def pull_files(path_to_files):
""""""
# TODO - somehow go back and make all the lists returned here be filled w tuples of current and next interpolation
# TODO - so then, we can iterate through each list in run_interpolation() and feed the function two rasters at a time.
jd_list = []
path_list = []
year_list = []
for p, dir, files in os.walk(path_to_files):
for i in files:
if i.endswith(".img"):
print "i", i
date = i[9:16]
year = date[:-3]
julian_doy = date[4:]
# print "date", date
# print "year", year
# print "Julian DOY", julian_doy
year_list.append(year)
julian_date = calculate_julian_date(year, julian_doy)
jd_list.append(julian_date)
file_path = os.path.join(p, i)
path_list.append(file_path)
# print "jd list", jd_list
# print "file path list", path_list
# diff = jd_list[1] - jd_list[0] # just checking that the difference between days of the year is correct...
# print "difference", diff
# return a list of arrays from gdal, or a list of tuples containing arrays,
raster_obj_list = read_files(path_list)
# print "obj list", raster_obj_list
jd_list = format_list(jd_list)
path_list = format_list(path_list)
raster_obj_list = format_list(raster_obj_list)
return jd_list, path_list, raster_obj_list, year_list
def get_arr(gdal_obj):
""""""
# get the imagine driver and register it
driver = gdal.GetDriverByName('HFA')
driver.Register()
# print "here is the gdal_obj", gdal_obj
band = gdal_obj.GetRasterBand(1)
data = band.ReadAsArray(0, 0)
# print "data", data
return data
def write_file(current_obj, arr, col, row, filename):
""""""
driver = current_obj.GetDriver()
driver.Register()
# TODO - Change for new output folder...
ds = driver.Create("/Volumes/Seagate Backup Plus Drive/all_dates_test/{}.img".format(filename), col, row, 1, gdal.GDT_Float32) #"" /Users/Gabe/Desktop/hard_drive_overflow/rapid_testfile_output/{}.img
ds.SetGeoTransform(current_obj.GetGeoTransform())
ds.SetProjection(current_obj.GetProjection())
ds_band = ds.GetRasterBand(1)
ds_band.WriteArray(arr)
def output_rasters(current_arr, next_arr, slope, start_date, end_date, date_count, current_obj, next_obj, year):
""""""
# get the driver from one of the objects
driver = current_obj.GetDriver()
driver.Register()
# geotransform = current_obj.GetGeoTransform()
col = current_obj.RasterXSize
row = current_obj.RasterYSize
# output the current arr to a file
print "START -> ndvi{}".format(start_date)
# reformat the date stirng
date_string = format_date(start_date, year)
write_file(current_obj, current_arr, col, row, "NDVI{}".format(date_string))
# output all the in-between rasters to files
cnt = 1 # TODO - Check here again if problem
print "here's the range \n", range(start_date + 1, end_date)
for i in range(start_date + 1, end_date): # -1
interp_arr = np.add(current_arr, (slope * cnt))
print "Bout to write ndvi_{}".format(i)
# reformat the date String
date_string = format_date(i, year)
write_file(current_obj, interp_arr, col, row, "NDVI{}".format(date_string))
print "wrote a file. Count: {}".format(cnt)
cnt += 1
# # output the next arr to a file
# todo fix the filename thing...
print "END -> ndvi_{}".format(end_date)
date_string = format_date(end_date, year)
write_file(current_obj, next_arr, col, row, "NDVI{}".format(date_string))
def interpolator(jd_list, path_list, raster_obj_list, year):
"""
:param jd_list:
:param path_list:
:param raster_obj_list:
:return:
"""
start_date = jd_list[0]
end_date = jd_list[-1]
# we need a total count of the number of days between our images.
date_count = end_date - start_date # + 1
print "date count", date_count
# this creates a range of every date between start and end.
date_range = range(start_date, end_date + 1)
# print "date range", date_range
current_obj = raster_obj_list[0]
next_obj = raster_obj_list[1]
current_arr = get_arr(current_obj)
# print "shape current arr", current_arr.shape
next_arr = get_arr(next_obj)
# print "shape next arr", next_arr.shape
diff = np.subtract(next_arr, current_arr)
slope = np.divide(diff, float(date_count))
# print "we got a slope, people!", slope
# Get the paths as well
current_path = path_list[0]
next_path = path_list[1]
output_rasters(current_arr, next_arr, slope, start_date, end_date, date_count, current_obj, next_obj, year)
def run_interpolator():
"""
This function is the master function that orchestrates reading of raster files into arrays, then passes them into an
interpolation function that interpolates the ndvi on a daily basis with a linear interpolation
:return:
"""
path_to_files = "/Users/Gabe/Desktop/hard_drive_overflow/METRIC_ETRM_Jornada_NDVI_P33R37" #METRIC_ETRM_Jornada_NDVI_P33R37"
jd_list, path_list, raster_obj_list, year_list = pull_files(path_to_files)
# # todo - have the array use findRasterIntersect here for each pair of rasters in the raster_obj_list
# # not sure if it's necessary yet to use it so...
# for i in raster_obj_list:
# findRasterIntersect(i[0], i[1])
print 'jd list \n', jd_list
# use the lists to run the interpolation.
for i, k, j, year in zip(jd_list, path_list, raster_obj_list, year_list):
interpolator(i, k, j, year)
if __name__ == "__main__":
run_interpolator()
| apache-2.0 |
0asa/scikit-learn | sklearn/feature_selection/rfe.py | 2 | 15171 | # Authors: Alexandre Gramfort <[email protected]>
# Vincent Michel <[email protected]>
# Gilles Louppe <[email protected]>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..cross_validation import _check_cv as check_cv
from ..cross_validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
estimator_params : dict
Parameters for the external estimator.
Useful for doing grid searches when an `RFE` object is passed as an
argument to, e.g., a `sklearn.grid_search.GridSearchCV` object.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]` corresponds to the \
ranking position of the i-th feature. Selected (i.e., estimated \
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
estimator_params={}, verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features / 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
estimator.set_params(**self.estimator_params)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
if estimator.coef_.ndim > 1:
ranks = np.argsort(safe_sqr(estimator.coef_).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(estimator.coef_))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
self.estimator_ = clone(self.estimator)
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(X[:, support_], y)
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int or cross-validation generator, optional (default=None)
If int, it is the number of folds.
If None, 3-fold cross-validation is performed by default.
Specific cross-validation objects can also be passed, see
`sklearn.cross_validation module` for details.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
estimator_params : dict
Parameters for the external estimator.
Useful for doing grid searches when an `RFE` object is passed as an
argument to, e.g., a `sklearn.grid_search.GridSearchCV` object.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
`grid_scores_[i]` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of grid_scores_ is equal to (n_features + step - 2) // step + 1,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None,
estimator_params={}, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
# Initialization
rfe = RFE(estimator=self.estimator, n_features_to_select=1,
step=self.step, estimator_params=self.estimator_params,
verbose=self.verbose - 1)
cv = check_cv(self.cv, X, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
scores = np.zeros(X.shape[1])
n_features_to_select_by_rank = np.zeros(X.shape[1])
# Cross-validation
for n, (train, test) in enumerate(cv):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
# Compute a full ranking of the features
# ranking_ contains the same set of values for all CV folds,
# but perhaps reordered
ranking_ = rfe.fit(X_train, y_train).ranking_
# Score each subset of features
for k in range(0, np.max(ranking_)):
indices = np.where(ranking_ <= k + 1)[0]
estimator = clone(self.estimator)
estimator.fit(X_train[:, indices], y_train)
score = _score(estimator, X_test[:, indices], y_test, scorer)
if self.verbose > 0:
print("Finished fold with %d / %d feature ranks, score=%f"
% (k + 1, np.max(ranking_), score))
scores[k] += score
# n_features_to_select_by_rank[k] is being overwritten
# multiple times, but by the same value
n_features_to_select_by_rank[k] = indices.size
# Select the best upper bound for feature rank. It's OK to use the
# last ranking_, as np.max(ranking_) is the same over all CV folds.
scores = scores[:np.max(ranking_)]
k = np.argmax(scores)
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select_by_rank[k],
step=self.step, estimator_params=self.estimator_params)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to len(cv) - 1
# here, the scores are normalized by len(cv)
self.grid_scores_ = scores / len(cv)
return self
| bsd-3-clause |
parthea/pydatalab | tests/ml/dataset_tests.py | 3 | 3980 | # Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import unittest
import csv
import os
import pandas as pd
import random
import shutil
import tempfile
import google.datalab
from google.datalab.ml import CsvDataSet
class TestCsvDataSet(unittest.TestCase):
def test_schema(self):
json_schema = TestCsvDataSet._create_json_schema()
# CsvDataSet can take a json schema, a Schema object, or a string
ds = CsvDataSet(file_pattern='some/file', schema=json_schema)
self.assertEqual(json_schema, ds.schema)
schema_obj = google.datalab.bigquery.Schema(json_schema)
ds = CsvDataSet(file_pattern='some/file', schema=schema_obj)
self.assertEqual(json_schema, ds.schema)
schema_str = 'id: INTEGER, field1: STRING, field2: INTEGER'
ds = CsvDataSet(file_pattern='some/file', schema=schema_str)
self.assertEqual(json_schema, ds.schema)
def test_sample(self):
tmp_dir = tempfile.mkdtemp()
try:
json_schema = TestCsvDataSet._create_json_schema()
all_rows = TestCsvDataSet._create_csv_files(tmp_dir, 'data', 3)
ds = CsvDataSet(file_pattern=os.path.join(tmp_dir, 'data*'), schema=json_schema)
df = ds.sample(5)
self.assertEqual(5, len(df))
self.assertTrue(isinstance(df, pd.DataFrame))
self.assertEqual(5, len(set(df['id'].tolist()))) # 5 unique rows.
# check the 5 samples below to the csv files by checking they are in
# all_rows
for _, row in df.iterrows():
row_index = row['id']
self.assertEqual(all_rows.iloc[row_index]['field1'], row['field1'])
self.assertEqual(all_rows.iloc[row_index]['field2'], row['field2'])
df = ds.sample(3 * 5)
self.assertEqual(3 * 5, len(df))
self.assertTrue(isinstance(df, pd.DataFrame))
self.assertEqual(3 * 5, len(set(df['id'].tolist()))) # 15 unique rows.
with self.assertRaises(ValueError):
df = ds.sample(3 * 5 + 1) # sample is larger than data size
finally:
shutil.rmtree(tmp_dir)
@staticmethod
def _create_csv_files(folder, filename, num_files):
"""Makes csv data files.
Makes files in the from:
folder/filename1.csv,
folder/filename2.csv,
...
folder/filename{num_files}.csv
Each file will have 5 random csv rows.
Args:
folder: output folder
filename: filename prefix
num_files: how many files to make
Returns:
A pandas dataframe containing all the csv rows where the id is the index
row.
"""
ex_id = 0
dfs = []
for i in range(1, num_files + 1):
full_file_name = os.path.join(folder, filename + str(i) + '.csv')
with open(full_file_name, 'w') as f:
writer = csv.writer(f)
for r in range(5):
writer.writerow([ex_id,
random.choice(['red', 'blue', 'green']),
random.randint(0, 100)])
ex_id += 1
dfs.append(pd.read_csv(
full_file_name,
names=['id', 'field1', 'field2'],
index_col='id',
header=None))
return pd.concat(dfs, axis=0, ignore_index=False)
@staticmethod
def _create_json_schema():
return [{'name': 'id', 'type': 'INTEGER'}, # unique id
{'name': 'field1', 'type': 'STRING'}, # random string
{'name': 'field2', 'type': 'INTEGER'}] # random int
| apache-2.0 |
lodemo/CATANA | src/visualization/database.py | 1 | 6195 | # -*- coding: utf-8 -*-
from sqlalchemy import Column, ForeignKey, Integer, BigInteger, String, Table, UnicodeText, Unicode, Boolean, LargeBinary, Float
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
from sqlalchemy import UniqueConstraint
from sqlalchemy.orm import sessionmaker
from contextlib import contextmanager
import json
from os import path #posixpath
Base = declarative_base()
'''
''
' Table Definitions
''
'''
class Channel(Base):
__tablename__ = 'channel'
id = Column(String(24), primary_key=True) # string may not work, use other then
title = Column(Unicode(100), nullable=False)
keywords = Column(UnicodeText, nullable=False) #a space-separated list of strings.
description = Column(UnicodeText) # max 1000chars
dateAdded = Column(String(24), nullable=False)
uploadsPlaylistID = Column(String(24), nullable=False)
latestUploadsIDs = Column(String(750), nullable=False) # json list string
unsubscribedTrailer = Column(String(11), nullable=False)
topicIds = Column(String(131))
network = Column(String(42)) # for later addition
crawlTimestamp = Column(String(20), nullable=False)
thumbnailUrl = Column(String(300))
history = relationship('ChannelHistory', cascade='delete')
videos = relationship('Video', cascade='delete')
featured = relationship('FeaturedChannel', cascade='delete')
def __repr__(self):
return u'<Channel (name=|%s|)>' % self.title
class FeaturedChannel(Base):
__tablename__ = 'channel_featured'
id = Column(Integer, primary_key=True)
channelID = Column(String(24), ForeignKey('channel.id'))
featuredChannelID = Column(String(24))
class ChannelHistory(Base):
__tablename__ = 'channel_history'
id = Column(Integer, primary_key=True)
channelID = Column(String(24), ForeignKey('channel.id'))
viewCount = Column(BigInteger, nullable=False)
subscriberCount = Column(Integer, nullable=False)
commentCount = Column(Integer, nullable=False)
videoCount = Column(Integer, nullable=False)
crawlTimestamp = Column(String(20), nullable=False)
class Video(Base):
__tablename__ = 'video'
id = Column(String(11), primary_key=True) # string may not work, use other then
channelID = Column(String(24), ForeignKey('channel.id'))
title = Column(Unicode(300), nullable=False)
description = Column(UnicodeText, nullable=False) # max ~5000 characters actually
category = Column(Integer, nullable=False)
dateAdded = Column(String(24), nullable=False)
tags = Column(Unicode(750), nullable=False) # max 500 characters
topicIds = Column(String(131))
attribution = Column(String(42)) # for later network attribution
duration = Column(String(20), nullable=False)
crawlTimestamp = Column(String(20), nullable=False) # datetime type
deleted = Column(Boolean)
history = relationship('VideoHistory')
feature = relationship('VideoFeatures', cascade='delete')
class VideoHistory(Base):
__tablename__ = 'video_history'
id = Column(Integer, primary_key=True)
videoID = Column(String(11), ForeignKey('video.id'))
viewCount = Column(Integer, nullable=False)
commentCount = Column(Integer, nullable=False)
likeCount = Column(Integer, nullable=False)
dislikeCount = Column(Integer, nullable=False)
crawlTimestamp = Column(String(20), nullable=False)
class VideoFeatures(Base):
__tablename__ = 'video_features'
id = Column(Integer, primary_key=True)
videoID = Column(String(11), ForeignKey('video.id'))
feature = Column(LargeBinary) # correct datatype for numpy/pandas array? test
duration = Column(Float) # correct datatype for numpy/pandas array? test
cluster = relationship('VideoFaceCluster', cascade='delete')
class VideoFaceCluster(Base):
__tablename__ = 'video_face_cluster'
id = Column(Integer, primary_key=True)
featureID = Column(Integer, ForeignKey('video_features.id'))
cluster = Column(Integer)
class VideoFeatureQueue(Base):
__tablename__ = 'video_feature_queue'
id = Column(String(11), primary_key=True)
state = Column(String(9))
'''
''
' Database API class
''
'''
class YTDatabase(object):
#DATA_DIR = '/../../../data/'
#DB_FILE = 'ytDatabase.db'
DB_NAME = 'X'
DB_USER = 'X'
DB_PW = 'X'
DB_HOST = '127.0.0.1'
DB_PORT = '3306'
def __init__(self):
#DB_PATH = path.join(self.DATA_DIR, self.DB_FILE)
#self.engine = create_engine('sqlite://'+DB_PATH, encoding='utf-8', convert_unicode=True)
# This engine just used to query for list of databases
mysql_engine = create_engine('mysql+mysqldb://{0}:{1}@{2}:{3}'.format(self.DB_USER, self.DB_PW, self.DB_HOST, self.DB_PORT), encoding='utf-8', convert_unicode=True)
# Query for existing databases
mysql_engine.execute("CREATE DATABASE IF NOT EXISTS {0} ".format(self.DB_NAME))
# Go ahead and use this engine
self.engine = create_engine('mysql+mysqldb://{0}:{1}@{2}:{3}/{4}?charset=utf8mb4'.format(self.DB_USER, self.DB_PW, self.DB_HOST, self.DB_PORT, self.DB_NAME), encoding='utf-8', convert_unicode=True)
Base.metadata.bind = self.engine
self.DBSession = sessionmaker(bind = self.engine)
self.createDatabase()
self.DBSession().execute("SET NAMES utf8mb4 COLLATE 'utf8mb4_unicode_ci'")
self.DBSession().execute("SET CHARACTER SET utf8mb4")
def createDatabase(self, drop=False, update=False):
if drop:
Base.metadata.drop_all()
if not self.engine.table_names(): # checks if no tables exists
Base.metadata.create_all()
elif update:
Base.metadata.create_all()
@contextmanager
def _session_scope(self, commit=False):
"""Provide a transactional scope around a series of operations."""
session = self.DBSession()
try:
yield session
if commit:
session.commit()
except:
session.rollback()
raise
finally:
session.close()
| mit |
potash/scikit-learn | examples/feature_selection/plot_f_test_vs_mi.py | 75 | 1647 | """
===========================================
Comparison of F-test and mutual information
===========================================
This example illustrates the differences between univariate F-test statistics
and mutual information.
We consider 3 features x_1, x_2, x_3 distributed uniformly over [0, 1], the
target depends on them as follows:
y = x_1 + sin(6 * pi * x_2) + 0.1 * N(0, 1), that is the third features is completely irrelevant.
The code below plots the dependency of y against individual x_i and normalized
values of univariate F-tests statistics and mutual information.
As F-test captures only linear dependency, it rates x_1 as the most
discriminative feature. On the other hand, mutual information can capture any
kind of dependency between variables and it rates x_2 as the most
discriminative feature, which probably agrees better with our intuitive
perception for this example. Both methods correctly marks x_3 as irrelevant.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_selection import f_regression, mutual_info_regression
np.random.seed(0)
X = np.random.rand(1000, 3)
y = X[:, 0] + np.sin(6 * np.pi * X[:, 1]) + 0.1 * np.random.randn(1000)
f_test, _ = f_regression(X, y)
f_test /= np.max(f_test)
mi = mutual_info_regression(X, y)
mi /= np.max(mi)
plt.figure(figsize=(15, 5))
for i in range(3):
plt.subplot(1, 3, i + 1)
plt.scatter(X[:, i], y)
plt.xlabel("$x_{}$".format(i + 1), fontsize=14)
if i == 0:
plt.ylabel("$y$", fontsize=14)
plt.title("F-test={:.2f}, MI={:.2f}".format(f_test[i], mi[i]),
fontsize=16)
plt.show()
| bsd-3-clause |
rgommers/statsmodels | statsmodels/distributions/mixture_rvs.py | 27 | 9592 | from statsmodels.compat.python import range
import numpy as np
def _make_index(prob,size):
"""
Returns a boolean index for given probabilities.
Notes
---------
prob = [.75,.25] means that there is a 75% chance of the first column
being True and a 25% chance of the second column being True. The
columns are mutually exclusive.
"""
rv = np.random.uniform(size=(size,1))
cumprob = np.cumsum(prob)
return np.logical_and(np.r_[0,cumprob[:-1]] <= rv, rv < cumprob)
def mixture_rvs(prob, size, dist, kwargs=None):
"""
Sample from a mixture of distributions.
Parameters
----------
prob : array-like
Probability of sampling from each distribution in dist
size : int
The length of the returned sample.
dist : array-like
An iterable of distributions objects from scipy.stats.
kwargs : tuple of dicts, optional
A tuple of dicts. Each dict in kwargs can have keys loc, scale, and
args to be passed to the respective distribution in dist. If not
provided, the distribution defaults are used.
Examples
--------
Say we want 5000 random variables from mixture of normals with two
distributions norm(-1,.5) and norm(1,.5) and we want to sample from the
first with probability .75 and the second with probability .25.
>>> from scipy import stats
>>> prob = [.75,.25]
>>> Y = mixture_rvs(prob, 5000, dist=[stats.norm, stats.norm], kwargs =
(dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
idx = _make_index(prob,size)
sample = np.empty(size)
for i in range(len(prob)):
sample_idx = idx[...,i]
sample_size = sample_idx.sum()
loc = kwargs[i].get('loc',0)
scale = kwargs[i].get('scale',1)
args = kwargs[i].get('args',())
sample[sample_idx] = dist[i].rvs(*args, **dict(loc=loc,scale=scale,
size=sample_size))
return sample
class MixtureDistribution(object):
'''univariate mixture distribution
for simple case for now (unbound support)
does not yet inherit from scipy.stats.distributions
adding pdf to mixture_rvs, some restrictions on broadcasting
Currently it does not hold any state, all arguments included in each method.
'''
#def __init__(self, prob, size, dist, kwargs=None):
def rvs(self, prob, size, dist, kwargs=None):
return mixture_rvs(prob, size, dist, kwargs=kwargs)
def pdf(self, x, prob, dist, kwargs=None):
"""
pdf a mixture of distributions.
Parameters
----------
prob : array-like
Probability of sampling from each distribution in dist
dist : array-like
An iterable of distributions objects from scipy.stats.
kwargs : tuple of dicts, optional
A tuple of dicts. Each dict in kwargs can have keys loc, scale, and
args to be passed to the respective distribution in dist. If not
provided, the distribution defaults are used.
Examples
--------
Say we want 5000 random variables from mixture of normals with two
distributions norm(-1,.5) and norm(1,.5) and we want to sample from the
first with probability .75 and the second with probability .25.
>>> from scipy import stats
>>> prob = [.75,.25]
>>> Y = mixture.pdf(x, prob, dist=[stats.norm, stats.norm], kwargs =
(dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
for i in range(len(prob)):
loc = kwargs[i].get('loc',0)
scale = kwargs[i].get('scale',1)
args = kwargs[i].get('args',())
if i == 0: #assume all broadcast the same as the first dist
pdf_ = prob[i] * dist[i].pdf(x, *args, loc=loc, scale=scale)
else:
pdf_ += prob[i] * dist[i].pdf(x, *args, loc=loc, scale=scale)
return pdf_
def cdf(self, x, prob, dist, kwargs=None):
"""
cdf of a mixture of distributions.
Parameters
----------
prob : array-like
Probability of sampling from each distribution in dist
size : int
The length of the returned sample.
dist : array-like
An iterable of distributions objects from scipy.stats.
kwargs : tuple of dicts, optional
A tuple of dicts. Each dict in kwargs can have keys loc, scale, and
args to be passed to the respective distribution in dist. If not
provided, the distribution defaults are used.
Examples
--------
Say we want 5000 random variables from mixture of normals with two
distributions norm(-1,.5) and norm(1,.5) and we want to sample from the
first with probability .75 and the second with probability .25.
>>> from scipy import stats
>>> prob = [.75,.25]
>>> Y = mixture.pdf(x, prob, dist=[stats.norm, stats.norm], kwargs =
(dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
for i in range(len(prob)):
loc = kwargs[i].get('loc',0)
scale = kwargs[i].get('scale',1)
args = kwargs[i].get('args',())
if i == 0: #assume all broadcast the same as the first dist
cdf_ = prob[i] * dist[i].cdf(x, *args, loc=loc, scale=scale)
else:
cdf_ += prob[i] * dist[i].cdf(x, *args, loc=loc, scale=scale)
return cdf_
def mv_mixture_rvs(prob, size, dist, nvars, **kwargs):
"""
Sample from a mixture of multivariate distributions.
Parameters
----------
prob : array-like
Probability of sampling from each distribution in dist
size : int
The length of the returned sample.
dist : array-like
An iterable of distributions instances with callable method rvs.
nvargs : int
dimension of the multivariate distribution, could be inferred instead
kwargs : tuple of dicts, optional
ignored
Examples
--------
Say we want 2000 random variables from mixture of normals with two
multivariate normal distributions, and we want to sample from the
first with probability .4 and the second with probability .6.
import statsmodels.sandbox.distributions.mv_normal as mvd
cov3 = np.array([[ 1. , 0.5 , 0.75],
[ 0.5 , 1.5 , 0.6 ],
[ 0.75, 0.6 , 2. ]])
mu = np.array([-1, 0.0, 2.0])
mu2 = np.array([4, 2.0, 2.0])
mvn3 = mvd.MVNormal(mu, cov3)
mvn32 = mvd.MVNormal(mu2, cov3/2., 4)
rvs = mix.mv_mixture_rvs([0.4, 0.6], 2000, [mvn3, mvn32], 3)
"""
if len(prob) != len(dist):
raise ValueError("You must provide as many probabilities as distributions")
if not np.allclose(np.sum(prob), 1):
raise ValueError("prob does not sum to 1")
if kwargs is None:
kwargs = ({},)*len(prob)
idx = _make_index(prob,size)
sample = np.empty((size, nvars))
for i in range(len(prob)):
sample_idx = idx[...,i]
sample_size = sample_idx.sum()
#loc = kwargs[i].get('loc',0)
#scale = kwargs[i].get('scale',1)
#args = kwargs[i].get('args',())
# use int to avoid numpy bug with np.random.multivariate_normal
sample[sample_idx] = dist[i].rvs(size=int(sample_size))
return sample
if __name__ == '__main__':
from scipy import stats
obs_dist = mixture_rvs([.25,.75], size=10000, dist=[stats.norm, stats.beta],
kwargs=(dict(loc=-1,scale=.5),dict(loc=1,scale=1,args=(1,.5))))
nobs = 10000
mix = MixtureDistribution()
## mrvs = mixture_rvs([1/3.,2/3.], size=nobs, dist=[stats.norm, stats.norm],
## kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.75)))
mix_kwds = (dict(loc=-1,scale=.25),dict(loc=1,scale=.75))
mrvs = mix.rvs([1/3.,2/3.], size=nobs, dist=[stats.norm, stats.norm],
kwargs=mix_kwds)
grid = np.linspace(-4,4, 100)
mpdf = mix.pdf(grid, [1/3.,2/3.], dist=[stats.norm, stats.norm],
kwargs=mix_kwds)
mcdf = mix.cdf(grid, [1/3.,2/3.], dist=[stats.norm, stats.norm],
kwargs=mix_kwds)
doplot = 1
if doplot:
import matplotlib.pyplot as plt
plt.figure()
plt.hist(mrvs, bins=50, normed=True, color='red')
plt.title('histogram of sample and pdf')
plt.plot(grid, mpdf, lw=2, color='black')
plt.figure()
plt.hist(mrvs, bins=50, normed=True, cumulative=True, color='red')
plt.title('histogram of sample and pdf')
plt.plot(grid, mcdf, lw=2, color='black')
plt.show()
| bsd-3-clause |
mne-tools/mne-python | mne/viz/tests/test_ica.py | 4 | 15646 | # Authors: Denis Engemann <[email protected]>
# Alexandre Gramfort <[email protected]>
#
# License: Simplified BSD
import os.path as op
import numpy as np
from numpy.testing import assert_equal, assert_array_equal
import pytest
import matplotlib.pyplot as plt
from mne import (read_events, Epochs, read_cov, pick_types, Annotations,
make_fixed_length_events)
from mne.fixes import _close_event
from mne.io import read_raw_fif
from mne.preprocessing import ICA, create_ecg_epochs, create_eog_epochs
from mne.utils import requires_sklearn, _click_ch_name, catch_logging
from mne.viz.ica import _create_properties_layout, plot_ica_properties
from mne.viz.utils import _fake_click
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
event_name = op.join(base_dir, 'test-eve.fif')
event_id, tmin, tmax = 1, -0.1, 0.2
raw_ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')
def _get_raw(preload=False):
"""Get raw data."""
return read_raw_fif(raw_fname, preload=preload)
def _get_events():
"""Get events."""
return read_events(event_name)
def _get_picks(raw):
"""Get picks."""
return [0, 1, 2, 6, 7, 8, 12, 13, 14] # take a only few channels
def _get_epochs():
"""Get epochs."""
raw = _get_raw()
events = _get_events()
picks = _get_picks(raw)
with pytest.warns(RuntimeWarning, match='projection'):
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks)
return epochs
@requires_sklearn
def test_plot_ica_components():
"""Test plotting of ICA solutions."""
res = 8
fast_test = {"res": res, "contours": 0, "sensors": False}
raw = _get_raw()
ica = ICA(noise_cov=read_cov(cov_fname), n_components=2)
ica_picks = _get_picks(raw)
with pytest.warns(RuntimeWarning, match='projection'):
ica.fit(raw, picks=ica_picks)
for components in [0, [0], [0, 1], [0, 1] * 2, None]:
ica.plot_components(components, image_interp='bilinear',
colorbar=True, **fast_test)
plt.close('all')
# test interactive mode (passing 'inst' arg)
with catch_logging() as log:
ica.plot_components([0, 1], image_interp='bilinear', inst=raw, res=16,
verbose='debug', ch_type='grad')
log = log.getvalue()
assert 'grad data' in log
assert 'Interpolation mode local to mean' in log
fig = plt.gcf()
# test title click
# ----------------
lbl = fig.axes[1].get_label()
ica_idx = int(lbl[-3:])
titles = [ax.title for ax in fig.axes]
title_pos_midpoint = (titles[1].get_window_extent().extents
.reshape((2, 2)).mean(axis=0))
# first click adds to exclude
_fake_click(fig, fig.axes[1], title_pos_midpoint, xform='pix')
assert ica_idx in ica.exclude
# clicking again removes from exclude
_fake_click(fig, fig.axes[1], title_pos_midpoint, xform='pix')
assert ica_idx not in ica.exclude
# test topo click
# ---------------
_fake_click(fig, fig.axes[1], (0., 0.), xform='data')
c_fig = plt.gcf()
labels = [ax.get_label() for ax in c_fig.axes]
for label in ['topomap', 'image', 'erp', 'spectrum', 'variance']:
assert label in labels
topomap_ax = c_fig.axes[labels.index('topomap')]
title = topomap_ax.get_title()
assert (lbl == title)
ica.info = None
with pytest.raises(RuntimeError, match='fit the ICA'):
ica.plot_components(1, ch_type='mag')
@pytest.mark.slowtest
@requires_sklearn
def test_plot_ica_properties():
"""Test plotting of ICA properties."""
raw = _get_raw(preload=True).crop(0, 5)
raw.add_proj([], remove_existing=True)
raw.info['highpass'] = 1.0 # fake high-pass filtering
events = make_fixed_length_events(raw)
picks = _get_picks(raw)[:6]
pick_names = [raw.ch_names[k] for k in picks]
raw.pick_channels(pick_names)
reject = dict(grad=4000e-13, mag=4e-12)
epochs = Epochs(raw, events[:3], event_id, tmin, tmax,
baseline=(None, 0), preload=True)
ica = ICA(noise_cov=read_cov(cov_fname), n_components=2, max_iter=1,
random_state=0)
with pytest.warns(RuntimeWarning, match='projection'):
ica.fit(raw)
# test _create_properties_layout
fig, ax = _create_properties_layout()
assert_equal(len(ax), 5)
with pytest.raises(ValueError, match='specify both fig and figsize'):
_create_properties_layout(figsize=(2, 2), fig=fig)
topoargs = dict(topomap_args={'res': 4, 'contours': 0, "sensors": False})
with catch_logging() as log:
ica.plot_properties(raw, picks=0, verbose='debug', **topoargs)
log = log.getvalue()
assert raw.ch_names[0] == 'MEG 0113'
assert 'Interpolation mode local to mean' in log, log
ica.plot_properties(epochs, picks=1, dB=False, plot_std=1.5, **topoargs)
ica.plot_properties(epochs, picks=1, image_args={'sigma': 1.5},
topomap_args={'res': 4, 'colorbar': True},
psd_args={'fmax': 65.}, plot_std=False,
figsize=[4.5, 4.5], reject=reject)
plt.close('all')
with pytest.raises(TypeError, match='must be an instance'):
ica.plot_properties(epochs, dB=list('abc'))
with pytest.raises(TypeError, match='must be an instance'):
ica.plot_properties(ica)
with pytest.raises(TypeError, match='must be an instance'):
ica.plot_properties([0.2])
with pytest.raises(TypeError, match='must be an instance'):
plot_ica_properties(epochs, epochs)
with pytest.raises(TypeError, match='must be an instance'):
ica.plot_properties(epochs, psd_args='not dict')
with pytest.raises(TypeError, match='must be an instance'):
ica.plot_properties(epochs, plot_std=[])
fig, ax = plt.subplots(2, 3)
ax = ax.ravel()[:-1]
ica.plot_properties(epochs, picks=1, axes=ax, **topoargs)
pytest.raises(TypeError, plot_ica_properties, epochs, ica, picks=[0, 1],
axes=ax)
pytest.raises(ValueError, ica.plot_properties, epochs, axes='not axes')
plt.close('all')
# Test merging grads.
pick_names = raw.ch_names[:15:2] + raw.ch_names[1:15:2]
raw = _get_raw(preload=True).pick_channels(pick_names).crop(0, 5)
raw.info.normalize_proj()
ica = ICA(random_state=0, max_iter=1)
with pytest.warns(UserWarning, match='did not converge'):
ica.fit(raw)
ica.plot_properties(raw)
plt.close('all')
# Test handling of zeros
ica = ICA(random_state=0, max_iter=1)
epochs.pick_channels(pick_names)
with pytest.warns(UserWarning, match='did not converge'):
ica.fit(epochs)
epochs._data[0] = 0
with pytest.warns(None): # Usually UserWarning: Infinite value .* for epo
ica.plot_properties(epochs, **topoargs)
plt.close('all')
# Test Raw with annotations
annot = Annotations(onset=[1], duration=[1], description=['BAD'])
raw_annot = _get_raw(preload=True).set_annotations(annot).crop(0, 8)
raw_annot.pick(np.arange(10))
raw_annot.del_proj()
with pytest.warns(UserWarning, match='did not converge'):
ica.fit(raw_annot)
# drop bad data segments
fig = ica.plot_properties(raw_annot, picks=[0, 1], **topoargs)
assert_equal(len(fig), 2)
# don't drop
ica.plot_properties(raw_annot, reject_by_annotation=False, **topoargs)
@requires_sklearn
def test_plot_ica_sources():
"""Test plotting of ICA panel."""
raw = read_raw_fif(raw_fname).crop(0, 1).load_data()
picks = _get_picks(raw)
epochs = _get_epochs()
raw.pick_channels([raw.ch_names[k] for k in picks])
ica_picks = pick_types(raw.info, meg=True, eeg=False, stim=False,
ecg=False, eog=False, exclude='bads')
ica = ICA(n_components=2)
ica.fit(raw, picks=ica_picks)
ica.exclude = [1]
fig = ica.plot_sources(raw)
assert len(plt.get_fignums()) == 1
# change which component is in ICA.exclude (click data trace to remove
# current one; click name to add other one)
fig.canvas.draw()
x = fig.mne.traces[1].get_xdata()[5]
y = fig.mne.traces[1].get_ydata()[5]
_fake_click(fig, fig.mne.ax_main, (x, y), xform='data') # exclude = []
_click_ch_name(fig, ch_index=0, button=1) # exclude = [0]
fig.canvas.key_press_event(fig.mne.close_key)
_close_event(fig)
assert len(plt.get_fignums()) == 0
assert_array_equal(ica.exclude, [0])
# test when picks does not include ica.exclude.
fig = ica.plot_sources(raw, picks=[1])
assert len(plt.get_fignums()) == 1
plt.close('all')
# dtype can change int->np.int64 after load, test it explicitly
ica.n_components_ = np.int64(ica.n_components_)
# test clicks on y-label (need >2 secs for plot_properties() to work)
long_raw = read_raw_fif(raw_fname).crop(0, 5).load_data()
fig = ica.plot_sources(long_raw)
assert len(plt.get_fignums()) == 1
fig.canvas.draw()
_click_ch_name(fig, ch_index=0, button=3)
assert len(fig.mne.child_figs) == 1
assert len(plt.get_fignums()) == 2
# close child fig directly (workaround for mpl issue #18609)
fig.mne.child_figs[0].canvas.key_press_event('escape')
assert len(plt.get_fignums()) == 1
fig.canvas.key_press_event(fig.mne.close_key)
assert len(plt.get_fignums()) == 0
del long_raw
# test with annotations
orig_annot = raw.annotations
raw.set_annotations(Annotations([0.2], [0.1], 'Test'))
fig = ica.plot_sources(raw)
assert len(fig.mne.ax_main.collections) == 1
assert len(fig.mne.ax_hscroll.collections) == 1
raw.set_annotations(orig_annot)
# test error handling
raw.info['bads'] = ['MEG 0113']
with pytest.raises(RuntimeError, match="Raw doesn't match fitted data"):
ica.plot_sources(inst=raw)
epochs.info['bads'] = ['MEG 0113']
with pytest.raises(RuntimeError, match="Epochs don't match fitted data"):
ica.plot_sources(inst=epochs)
epochs.info['bads'] = []
# test w/ epochs and evokeds
ica.plot_sources(epochs)
ica.plot_sources(epochs.average())
evoked = epochs.average()
fig = ica.plot_sources(evoked)
# Test a click
ax = fig.get_axes()[0]
line = ax.lines[0]
_fake_click(fig, ax,
[line.get_xdata()[0], line.get_ydata()[0]], 'data')
_fake_click(fig, ax,
[ax.get_xlim()[0], ax.get_ylim()[1]], 'data')
# plot with bad channels excluded
ica.exclude = [0]
ica.plot_sources(evoked)
ica.labels_ = dict(eog=[0])
ica.labels_['eog/0/crazy-channel'] = [0]
ica.plot_sources(evoked) # now with labels
with pytest.raises(ValueError, match='must be of Raw or Epochs type'):
ica.plot_sources('meeow')
@pytest.mark.slowtest
@requires_sklearn
def test_plot_ica_overlay():
"""Test plotting of ICA cleaning."""
raw = _get_raw(preload=True)
raw.info['highpass'] = 1.0 # fake high-pass filtering
picks = _get_picks(raw)
ica = ICA(noise_cov=read_cov(cov_fname), n_components=2, random_state=0)
# can't use info.normalize_proj here because of how and when ICA and Epochs
# objects do picking of Raw data
with pytest.warns(RuntimeWarning, match='projection'):
ica.fit(raw, picks=picks)
# don't test raw, needs preload ...
with pytest.warns(RuntimeWarning, match='projection'):
ecg_epochs = create_ecg_epochs(raw, picks=picks)
ica.plot_overlay(ecg_epochs.average())
with pytest.warns(RuntimeWarning, match='projection'):
eog_epochs = create_eog_epochs(raw, picks=picks)
ica.plot_overlay(eog_epochs.average(), n_pca_components=2)
pytest.raises(TypeError, ica.plot_overlay, raw[:2, :3][0])
pytest.raises(TypeError, ica.plot_overlay, raw, exclude=2)
ica.plot_overlay(raw)
plt.close('all')
# smoke test for CTF
raw = read_raw_fif(raw_ctf_fname)
raw.apply_gradient_compensation(3)
raw.info['highpass'] = 1.0 # fake high-pass filtering
picks = pick_types(raw.info, meg=True, ref_meg=False)
ica = ICA(n_components=2, )
ica.fit(raw, picks=picks)
with pytest.warns(RuntimeWarning, match='longer than'):
ecg_epochs = create_ecg_epochs(raw)
ica.plot_overlay(ecg_epochs.average())
def _get_geometry(fig):
try:
return fig.axes[0].get_subplotspec().get_geometry() # pragma: no cover
except AttributeError: # MPL < 3.4 (probably)
return fig.axes[0].get_geometry() # pragma: no cover
@requires_sklearn
def test_plot_ica_scores():
"""Test plotting of ICA scores."""
raw = _get_raw()
picks = _get_picks(raw)
ica = ICA(noise_cov=read_cov(cov_fname), n_components=2)
with pytest.warns(RuntimeWarning, match='projection'):
ica.fit(raw, picks=picks)
ica.plot_scores([0.3, 0.2], axhline=[0.1, -0.1], figsize=(6.4, 2.7))
ica.plot_scores([[0.3, 0.2], [0.3, 0.2]], axhline=[0.1, -0.1])
# check labels
ica.labels_ = dict()
ica.labels_['eog'] = 0
ica.labels_['ecg'] = 1
ica.plot_scores([0.3, 0.2], axhline=[0.1, -0.1], labels='eog')
ica.plot_scores([0.3, 0.2], axhline=[0.1, -0.1], labels='ecg')
ica.labels_['eog/0/foo'] = 0
ica.labels_['ecg/1/bar'] = 0
ica.plot_scores([0.3, 0.2], axhline=[0.1, -0.1], labels='foo')
ica.plot_scores([0.3, 0.2], axhline=[0.1, -0.1], labels='eog')
ica.plot_scores([0.3, 0.2], axhline=[0.1, -0.1], labels='ecg')
# check setting number of columns
fig = ica.plot_scores([[0.3, 0.2], [0.3, 0.2], [0.3, 0.2]],
axhline=[0.1, -0.1])
assert 2 == _get_geometry(fig)[1]
fig = ica.plot_scores([[0.3, 0.2], [0.3, 0.2]], axhline=[0.1, -0.1],
n_cols=1)
assert 1 == _get_geometry(fig)[1]
# only use 1 column (even though 2 were requested)
fig = ica.plot_scores([0.3, 0.2], axhline=[0.1, -0.1], n_cols=2)
assert 1 == _get_geometry(fig)[1]
with pytest.raises(ValueError, match='Need as many'):
ica.plot_scores([0.3, 0.2], axhline=[0.1, -0.1],
labels=['one', 'one-too-many'])
with pytest.raises(ValueError, match='The length of'):
ica.plot_scores([0.2])
@requires_sklearn
def test_plot_instance_components():
"""Test plotting of components as instances of raw and epochs."""
raw = _get_raw()
picks = _get_picks(raw)
ica = ICA(noise_cov=read_cov(cov_fname), n_components=2)
with pytest.warns(RuntimeWarning, match='projection'):
ica.fit(raw, picks=picks)
ica.exclude = [0]
fig = ica.plot_sources(raw, title='Components')
keys = ('home', 'home', 'end', 'down', 'up', 'right', 'left', '-', '+',
'=', 'd', 'd', 'pageup', 'pagedown', 'z', 'z', 's', 's', 'f11',
'b')
for key in keys:
fig.canvas.key_press_event(key)
ax = fig.mne.ax_main
line = ax.lines[0]
_fake_click(fig, ax, [line.get_xdata()[0], line.get_ydata()[0]],
'data')
_fake_click(fig, ax, [-0.1, 0.9]) # click on y-label
fig.canvas.key_press_event('escape')
plt.close('all')
epochs = _get_epochs()
fig = ica.plot_sources(epochs, title='Components')
for key in keys:
fig.canvas.key_press_event(key)
# Test a click
ax = fig.get_axes()[0]
line = ax.lines[0]
_fake_click(fig, ax, [line.get_xdata()[0], line.get_ydata()[0]], 'data')
_fake_click(fig, ax, [-0.1, 0.9]) # click on y-label
fig.canvas.key_press_event('escape')
| bsd-3-clause |
sunshinelover/chanlun | vn.trader/ctaAlgo/ctaBacktesting.py | 1 | 34592 | # encoding: UTF-8
'''
本文件中包含的是CTA模块的回测引擎,回测引擎的API和CTA引擎一致,
可以使用和实盘相同的代码进行回测。
'''
from __future__ import division
from datetime import datetime, timedelta
from collections import OrderedDict
from itertools import product
import pymongo
from ctaBase import *
from ctaSetting import *
from vtConstant import *
from vtGateway import VtOrderData, VtTradeData
from vtFunction import loadMongoSetting
########################################################################
class BacktestingEngine(object):
"""
CTA回测引擎
函数接口和策略引擎保持一样,
从而实现同一套代码从回测到实盘。
"""
TICK_MODE = 'tick'
BAR_MODE = 'bar'
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
# 本地停止单编号计数
self.stopOrderCount = 0
# stopOrderID = STOPORDERPREFIX + str(stopOrderCount)
# 本地停止单字典
# key为stopOrderID,value为stopOrder对象
self.stopOrderDict = {} # 停止单撤销后不会从本字典中删除
self.workingStopOrderDict = {} # 停止单撤销后会从本字典中删除
# 引擎类型为回测
self.engineType = ENGINETYPE_BACKTESTING
# 回测相关
self.strategy = None # 回测策略
self.mode = self.BAR_MODE # 回测模式,默认为K线
self.slippage = 0 # 回测时假设的滑点
self.rate = 0 # 回测时假设的佣金比例(适用于百分比佣金)
self.size = 1 # 合约大小,默认为1
self.dbClient = None # 数据库客户端
self.dbCursor = None # 数据库指针
#self.historyData = [] # 历史数据的列表,回测用
self.initData = [] # 初始化用的数据
#self.backtestingData = [] # 回测用的数据
self.dbName = '' # 回测数据库名
self.symbol = '' # 回测集合名
self.dataStartDate = None # 回测数据开始日期,datetime对象
self.dataEndDate = None # 回测数据结束日期,datetime对象
self.strategyStartDate = None # 策略启动日期(即前面的数据用于初始化),datetime对象
self.limitOrderDict = OrderedDict() # 限价单字典
self.workingLimitOrderDict = OrderedDict() # 活动限价单字典,用于进行撮合用
self.limitOrderCount = 0 # 限价单编号
self.tradeCount = 0 # 成交编号
self.tradeDict = OrderedDict() # 成交字典
self.logList = [] # 日志记录
# 当前最新数据,用于模拟成交用
self.tick = None
self.bar = None
self.dt = None # 最新的时间
#----------------------------------------------------------------------
def setStartDate(self, startDate='20100416', initDays=10):
"""设置回测的启动日期"""
self.dataStartDate = datetime.strptime(startDate, '%Y%m%d')
initTimeDelta = timedelta(initDays)
self.strategyStartDate = self.dataStartDate + initTimeDelta
#----------------------------------------------------------------------
def setEndDate(self, endDate=''):
"""设置回测的结束日期"""
if endDate:
self.dataEndDate= datetime.strptime(endDate, '%Y%m%d')
#----------------------------------------------------------------------
def setBacktestingMode(self, mode):
"""设置回测模式"""
self.mode = mode
#----------------------------------------------------------------------
def setDatabase(self, dbName, symbol):
"""设置历史数据所用的数据库"""
self.dbName = dbName
self.symbol = symbol
#----------------------------------------------------------------------
def loadHistoryData(self):
"""载入历史数据"""
host, port = loadMongoSetting()
self.dbClient = pymongo.MongoClient(host, port)
collection = self.dbClient[self.dbName][self.symbol]
self.output(u'开始载入数据')
# 首先根据回测模式,确认要使用的数据类
if self.mode == self.BAR_MODE:
dataClass = CtaBarData
func = self.newBar
else:
dataClass = CtaTickData
func = self.newTick
# 载入初始化需要用的数据
flt = {'datetime':{'$gte':self.dataStartDate,
'$lt':self.strategyStartDate}}
initCursor = collection.find(flt)
# 将数据从查询指针中读取出,并生成列表
for d in initCursor:
data = dataClass()
data.__dict__ = d
self.initData.append(data)
# 载入回测数据
if not self.dataEndDate:
flt = {'datetime':{'$gte':self.strategyStartDate}} # 数据过滤条件
else:
flt = {'datetime':{'$gte':self.strategyStartDate,
'$lte':self.dataEndDate}}
self.dbCursor = collection.find(flt)
self.output(u'载入完成,数据量:%s' %(initCursor.count() + self.dbCursor.count()))
#----------------------------------------------------------------------
def runBacktesting(self):
"""运行回测"""
# 载入历史数据
self.loadHistoryData()
# 首先根据回测模式,确认要使用的数据类
if self.mode == self.BAR_MODE:
dataClass = CtaBarData
func = self.newBar
else:
dataClass = CtaTickData
func = self.newTick
self.output(u'开始回测')
self.strategy.inited = True
self.strategy.onInit()
self.output(u'策略初始化完成')
self.strategy.trading = True
self.strategy.onStart()
self.output(u'策略启动完成')
self.output(u'开始回放数据')
for d in self.dbCursor:
data = dataClass()
data.__dict__ = d
func(data)
self.output(u'数据回放结束')
#----------------------------------------------------------------------
def newBar(self, bar):
"""新的K线"""
self.bar = bar
self.dt = bar.datetime
self.crossLimitOrder() # 先撮合限价单
self.crossStopOrder() # 再撮合停止单
self.strategy.onBar(bar) # 推送K线到策略中
#----------------------------------------------------------------------
def newTick(self, tick):
"""新的Tick"""
self.tick = tick
self.dt = tick.datetime
self.crossLimitOrder()
self.crossStopOrder()
self.strategy.onTick(tick)
#----------------------------------------------------------------------
def initStrategy(self, strategyClass, setting=None):
"""
初始化策略
setting是策略的参数设置,如果使用类中写好的默认设置则可以不传该参数
"""
self.strategy = strategyClass(self, setting)
self.strategy.name = self.strategy.className
#----------------------------------------------------------------------
def sendOrder(self, vtSymbol, orderType, price, volume, strategy):
"""发单"""
self.limitOrderCount += 1
orderID = str(self.limitOrderCount)
order = VtOrderData()
order.vtSymbol = vtSymbol
order.price = price
order.totalVolume = volume
order.status = STATUS_NOTTRADED # 刚提交尚未成交
order.orderID = orderID
order.vtOrderID = orderID
order.orderTime = str(self.dt)
# CTA委托类型映射
if orderType == CTAORDER_BUY:
order.direction = DIRECTION_LONG
order.offset = OFFSET_OPEN
elif orderType == CTAORDER_SELL:
order.direction = DIRECTION_SHORT
order.offset = OFFSET_CLOSE
elif orderType == CTAORDER_SHORT:
order.direction = DIRECTION_SHORT
order.offset = OFFSET_OPEN
elif orderType == CTAORDER_COVER:
order.direction = DIRECTION_LONG
order.offset = OFFSET_CLOSE
# 保存到限价单字典中
self.workingLimitOrderDict[orderID] = order
self.limitOrderDict[orderID] = order
return orderID
#----------------------------------------------------------------------
def cancelOrder(self, vtOrderID):
"""撤单"""
if vtOrderID in self.workingLimitOrderDict:
order = self.workingLimitOrderDict[vtOrderID]
order.status = STATUS_CANCELLED
order.cancelTime = str(self.dt)
del self.workingLimitOrderDict[vtOrderID]
#----------------------------------------------------------------------
def sendStopOrder(self, vtSymbol, orderType, price, volume, strategy):
"""发停止单(本地实现)"""
self.stopOrderCount += 1
stopOrderID = STOPORDERPREFIX + str(self.stopOrderCount)
so = StopOrder()
so.vtSymbol = vtSymbol
so.price = price
so.volume = volume
so.strategy = strategy
so.stopOrderID = stopOrderID
so.status = STOPORDER_WAITING
if orderType == CTAORDER_BUY:
so.direction = DIRECTION_LONG
so.offset = OFFSET_OPEN
elif orderType == CTAORDER_SELL:
so.direction = DIRECTION_SHORT
so.offset = OFFSET_CLOSE
elif orderType == CTAORDER_SHORT:
so.direction = DIRECTION_SHORT
so.offset = OFFSET_OPEN
elif orderType == CTAORDER_COVER:
so.direction = DIRECTION_LONG
so.offset = OFFSET_CLOSE
# 保存stopOrder对象到字典中
self.stopOrderDict[stopOrderID] = so
self.workingStopOrderDict[stopOrderID] = so
return stopOrderID
#----------------------------------------------------------------------
def cancelStopOrder(self, stopOrderID):
"""撤销停止单"""
# 检查停止单是否存在
if stopOrderID in self.workingStopOrderDict:
so = self.workingStopOrderDict[stopOrderID]
so.status = STOPORDER_CANCELLED
del self.workingStopOrderDict[stopOrderID]
#----------------------------------------------------------------------
def crossLimitOrder(self):
"""基于最新数据撮合限价单"""
# 先确定会撮合成交的价格
if self.mode == self.BAR_MODE:
buyCrossPrice = self.bar.low # 若买入方向限价单价格高于该价格,则会成交
sellCrossPrice = self.bar.high # 若卖出方向限价单价格低于该价格,则会成交
buyBestCrossPrice = self.bar.open # 在当前时间点前发出的买入委托可能的最优成交价
sellBestCrossPrice = self.bar.open # 在当前时间点前发出的卖出委托可能的最优成交价
else:
buyCrossPrice = self.tick.askPrice1
sellCrossPrice = self.tick.bidPrice1
buyBestCrossPrice = self.tick.askPrice1
sellBestCrossPrice = self.tick.bidPrice1
# 遍历限价单字典中的所有限价单
for orderID, order in self.workingLimitOrderDict.items():
# 判断是否会成交
buyCross = order.direction==DIRECTION_LONG and order.price>=buyCrossPrice
sellCross = order.direction==DIRECTION_SHORT and order.price<=sellCrossPrice
# 如果发生了成交
if buyCross or sellCross:
# 推送成交数据
self.tradeCount += 1 # 成交编号自增1
tradeID = str(self.tradeCount)
trade = VtTradeData()
trade.vtSymbol = order.vtSymbol
trade.tradeID = tradeID
trade.vtTradeID = tradeID
trade.orderID = order.orderID
trade.vtOrderID = order.orderID
trade.direction = order.direction
trade.offset = order.offset
# 以买入为例:
# 1. 假设当根K线的OHLC分别为:100, 125, 90, 110
# 2. 假设在上一根K线结束(也是当前K线开始)的时刻,策略发出的委托为限价105
# 3. 则在实际中的成交价会是100而不是105,因为委托发出时市场的最优价格是100
if buyCross:
trade.price = min(order.price, buyBestCrossPrice)
self.strategy.pos += order.totalVolume
else:
trade.price = max(order.price, sellBestCrossPrice)
self.strategy.pos -= order.totalVolume
trade.volume = order.totalVolume
trade.tradeTime = str(self.dt)
trade.dt = self.dt
self.strategy.onTrade(trade)
self.tradeDict[tradeID] = trade
# 推送委托数据
order.tradedVolume = order.totalVolume
order.status = STATUS_ALLTRADED
self.strategy.onOrder(order)
# 从字典中删除该限价单
del self.workingLimitOrderDict[orderID]
#----------------------------------------------------------------------
def crossStopOrder(self):
"""基于最新数据撮合停止单"""
# 先确定会撮合成交的价格,这里和限价单规则相反
if self.mode == self.BAR_MODE:
buyCrossPrice = self.bar.high # 若买入方向停止单价格低于该价格,则会成交
sellCrossPrice = self.bar.low # 若卖出方向限价单价格高于该价格,则会成交
bestCrossPrice = self.bar.open # 最优成交价,买入停止单不能低于,卖出停止单不能高于
else:
buyCrossPrice = self.tick.lastPrice
sellCrossPrice = self.tick.lastPrice
bestCrossPrice = self.tick.lastPrice
# 遍历停止单字典中的所有停止单
for stopOrderID, so in self.workingStopOrderDict.items():
# 判断是否会成交
buyCross = so.direction==DIRECTION_LONG and so.price<=buyCrossPrice
sellCross = so.direction==DIRECTION_SHORT and so.price>=sellCrossPrice
# 如果发生了成交
if buyCross or sellCross:
# 推送成交数据
self.tradeCount += 1 # 成交编号自增1
tradeID = str(self.tradeCount)
trade = VtTradeData()
trade.vtSymbol = so.vtSymbol
trade.tradeID = tradeID
trade.vtTradeID = tradeID
if buyCross:
self.strategy.pos += so.volume
trade.price = max(bestCrossPrice, so.price)
else:
self.strategy.pos -= so.volume
trade.price = min(bestCrossPrice, so.price)
self.limitOrderCount += 1
orderID = str(self.limitOrderCount)
trade.orderID = orderID
trade.vtOrderID = orderID
trade.direction = so.direction
trade.offset = so.offset
trade.volume = so.volume
trade.tradeTime = str(self.dt)
trade.dt = self.dt
self.strategy.onTrade(trade)
self.tradeDict[tradeID] = trade
# 推送委托数据
so.status = STOPORDER_TRIGGERED
order = VtOrderData()
order.vtSymbol = so.vtSymbol
order.symbol = so.vtSymbol
order.orderID = orderID
order.vtOrderID = orderID
order.direction = so.direction
order.offset = so.offset
order.price = so.price
order.totalVolume = so.volume
order.tradedVolume = so.volume
order.status = STATUS_ALLTRADED
order.orderTime = trade.tradeTime
self.strategy.onOrder(order)
self.limitOrderDict[orderID] = order
# 从字典中删除该限价单
del self.workingStopOrderDict[stopOrderID]
#----------------------------------------------------------------------
def insertData(self, dbName, collectionName, data):
"""考虑到回测中不允许向数据库插入数据,防止实盘交易中的一些代码出错"""
pass
#----------------------------------------------------------------------
def loadBar(self, dbName, collectionName, startDate):
"""直接返回初始化数据列表中的Bar"""
return self.initData
#----------------------------------------------------------------------
def loadTick(self, dbName, collectionName, startDate):
"""直接返回初始化数据列表中的Tick"""
return self.initData
#----------------------------------------------------------------------
def writeCtaLog(self, content):
"""记录日志"""
log = str(self.dt) + ' ' + content
self.logList.append(log)
#----------------------------------------------------------------------
def output(self, content):
"""输出内容"""
print str(datetime.now()) + "\t" + content
#----------------------------------------------------------------------
def calculateBacktestingResult(self):
"""
计算回测结果
"""
self.output(u'计算回测结果')
# 首先基于回测后的成交记录,计算每笔交易的盈亏
resultList = [] # 交易结果列表
longTrade = [] # 未平仓的多头交易
shortTrade = [] # 未平仓的空头交易
for trade in self.tradeDict.values():
# 多头交易
if trade.direction == DIRECTION_LONG:
# 如果尚无空头交易
if not shortTrade:
longTrade.append(trade)
# 当前多头交易为平空
else:
while True:
entryTrade = shortTrade[0]
exitTrade = trade
# 清算开平仓交易
closedVolume = min(exitTrade.volume, entryTrade.volume)
result = TradingResult(entryTrade.price, entryTrade.dt,
exitTrade.price, exitTrade.dt,
-closedVolume, self.rate, self.slippage, self.size)
resultList.append(result)
# 计算未清算部分
entryTrade.volume -= closedVolume
exitTrade.volume -= closedVolume
# 如果开仓交易已经全部清算,则从列表中移除
if not entryTrade.volume:
shortTrade.pop(0)
# 如果平仓交易已经全部清算,则退出循环
if not exitTrade.volume:
break
# 如果平仓交易未全部清算,
if exitTrade.volume:
# 且开仓交易已经全部清算完,则平仓交易剩余的部分
# 等于新的反向开仓交易,添加到队列中
if not shortTrade:
longTrade.append(exitTrade)
break
# 如果开仓交易还有剩余,则进入下一轮循环
else:
pass
# 空头交易
else:
# 如果尚无多头交易
if not longTrade:
shortTrade.append(trade)
# 当前空头交易为平多
else:
while True:
entryTrade = longTrade[0]
exitTrade = trade
# 清算开平仓交易
closedVolume = min(exitTrade.volume, entryTrade.volume)
result = TradingResult(entryTrade.price, entryTrade.dt,
exitTrade.price, exitTrade.dt,
closedVolume, self.rate, self.slippage, self.size)
resultList.append(result)
# 计算未清算部分
entryTrade.volume -= closedVolume
exitTrade.volume -= closedVolume
# 如果开仓交易已经全部清算,则从列表中移除
if not entryTrade.volume:
longTrade.pop(0)
# 如果平仓交易已经全部清算,则退出循环
if not exitTrade.volume:
break
# 如果平仓交易未全部清算,
if exitTrade.volume:
# 且开仓交易已经全部清算完,则平仓交易剩余的部分
# 等于新的反向开仓交易,添加到队列中
if not longTrade:
shortTrade.append(exitTrade)
break
# 如果开仓交易还有剩余,则进入下一轮循环
else:
pass
# 检查是否有交易
if not resultList:
self.output(u'无交易结果')
return {}
# 然后基于每笔交易的结果,我们可以计算具体的盈亏曲线和最大回撤等
capital = 0 # 资金
maxCapital = 0 # 资金最高净值
drawdown = 0 # 回撤
totalResult = 0 # 总成交数量
totalTurnover = 0 # 总成交金额(合约面值)
totalCommission = 0 # 总手续费
totalSlippage = 0 # 总滑点
timeList = [] # 时间序列
pnlList = [] # 每笔盈亏序列
capitalList = [] # 盈亏汇总的时间序列
drawdownList = [] # 回撤的时间序列
winningResult = 0 # 盈利次数
losingResult = 0 # 亏损次数
totalWinning = 0 # 总盈利金额
totalLosing = 0 # 总亏损金额
for result in resultList:
capital += result.pnl
maxCapital = max(capital, maxCapital)
drawdown = capital - maxCapital
pnlList.append(result.pnl)
timeList.append(result.exitDt) # 交易的时间戳使用平仓时间
capitalList.append(capital)
drawdownList.append(drawdown)
totalResult += 1
totalTurnover += result.turnover
totalCommission += result.commission
totalSlippage += result.slippage
if result.pnl >= 0:
winningResult += 1
totalWinning += result.pnl
else:
losingResult += 1
totalLosing += result.pnl
# 计算盈亏相关数据
winningRate = winningResult/totalResult*100 # 胜率
averageWinning = totalWinning/winningResult # 平均每笔盈利
averageLosing = totalLosing/losingResult # 平均每笔亏损
profitLossRatio = -averageWinning/averageLosing # 盈亏比
# 返回回测结果
d = {}
d['capital'] = capital
d['maxCapital'] = maxCapital
d['drawdown'] = drawdown
d['totalResult'] = totalResult
d['totalTurnover'] = totalTurnover
d['totalCommission'] = totalCommission
d['totalSlippage'] = totalSlippage
d['timeList'] = timeList
d['pnlList'] = pnlList
d['capitalList'] = capitalList
d['drawdownList'] = drawdownList
d['winningRate'] = winningRate
d['averageWinning'] = averageWinning
d['averageLosing'] = averageLosing
d['profitLossRatio'] = profitLossRatio
return d
#----------------------------------------------------------------------
def showBacktestingResult(self):
"""显示回测结果"""
d = self.calculateBacktestingResult()
# 输出
self.output('-' * 30)
self.output(u'第一笔交易:\t%s' % d['timeList'][0])
self.output(u'最后一笔交易:\t%s' % d['timeList'][-1])
self.output(u'总交易次数:\t%s' % formatNumber(d['totalResult']))
self.output(u'总盈亏:\t%s' % formatNumber(d['capital']))
self.output(u'最大回撤: \t%s' % formatNumber(min(d['drawdownList'])))
self.output(u'平均每笔盈利:\t%s' %formatNumber(d['capital']/d['totalResult']))
self.output(u'平均每笔滑点:\t%s' %formatNumber(d['totalSlippage']/d['totalResult']))
self.output(u'平均每笔佣金:\t%s' %formatNumber(d['totalCommission']/d['totalResult']))
self.output(u'胜率\t\t%s%%' %formatNumber(d['winningRate']))
self.output(u'平均每笔盈利\t%s' %formatNumber(d['averageWinning']))
self.output(u'平均每笔亏损\t%s' %formatNumber(d['averageLosing']))
self.output(u'盈亏比:\t%s' %formatNumber(d['profitLossRatio']))
# 绘图
import matplotlib.pyplot as plt
pCapital = plt.subplot(3, 1, 1)
pCapital.set_ylabel("capital")
pCapital.plot(d['capitalList'])
pDD = plt.subplot(3, 1, 2)
pDD.set_ylabel("DD")
pDD.bar(range(len(d['drawdownList'])), d['drawdownList'])
pPnl = plt.subplot(3, 1, 3)
pPnl.set_ylabel("pnl")
pPnl.hist(d['pnlList'], bins=50)
plt.show()
#----------------------------------------------------------------------
def putStrategyEvent(self, name):
"""发送策略更新事件,回测中忽略"""
pass
#----------------------------------------------------------------------
def setSlippage(self, slippage):
"""设置滑点点数"""
self.slippage = slippage
#----------------------------------------------------------------------
def setSize(self, size):
"""设置合约大小"""
self.size = size
#----------------------------------------------------------------------
def setRate(self, rate):
"""设置佣金比例"""
self.rate = rate
#----------------------------------------------------------------------
def runOptimization(self, strategyClass, optimizationSetting):
"""优化参数"""
# 获取优化设置
settingList = optimizationSetting.generateSetting()
targetName = optimizationSetting.optimizeTarget
# 检查参数设置问题
if not settingList or not targetName:
self.output(u'优化设置有问题,请检查')
# 遍历优化
resultList = []
for setting in settingList:
self.clearBacktestingResult()
self.output('-' * 30)
self.output('setting: %s' %str(setting))
self.initStrategy(strategyClass, setting)
self.runBacktesting()
d = self.calculateBacktestingResult()
try:
targetValue = d[targetName]
except KeyError:
targetValue = 0
resultList.append(([str(setting)], targetValue))
# 显示结果
resultList.sort(reverse=True, key=lambda result:result[1])
self.output('-' * 30)
self.output(u'优化结果:')
for result in resultList:
self.output(u'%s: %s' %(result[0], result[1]))
#----------------------------------------------------------------------
def clearBacktestingResult(self):
"""清空之前回测的结果"""
# 清空限价单相关
self.limitOrderCount = 0
self.limitOrderDict.clear()
self.workingLimitOrderDict.clear()
# 清空停止单相关
self.stopOrderCount = 0
self.stopOrderDict.clear()
self.workingStopOrderDict.clear()
# 清空成交相关
self.tradeCount = 0
self.tradeDict.clear()
########################################################################
class TradingResult(object):
"""每笔交易的结果"""
#----------------------------------------------------------------------
def __init__(self, entryPrice, entryDt, exitPrice,
exitDt, volume, rate, slippage, size):
"""Constructor"""
self.entryPrice = entryPrice # 开仓价格
self.exitPrice = exitPrice # 平仓价格
self.entryDt = entryDt # 开仓时间datetime
self.exitDt = exitDt # 平仓时间
self.volume = volume # 交易数量(+/-代表方向)
self.turnover = (self.entryPrice+self.exitPrice)*size*abs(volume) # 成交金额
self.commission = self.turnover*rate # 手续费成本
self.slippage = slippage*2*size*abs(volume) # 滑点成本
self.pnl = ((self.exitPrice - self.entryPrice) * volume * size
- self.commission - self.slippage) # 净盈亏
########################################################################
class OptimizationSetting(object):
"""优化设置"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.paramDict = OrderedDict()
self.optimizeTarget = '' # 优化目标字段
#----------------------------------------------------------------------
def addParameter(self, name, start, end, step):
"""增加优化参数"""
if end <= start:
print u'参数起始点必须小于终止点'
return
if step <= 0:
print u'参数布进必须大于0'
return
l = []
param = start
while param <= end:
l.append(param)
param += step
self.paramDict[name] = l
#----------------------------------------------------------------------
def generateSetting(self):
"""生成优化参数组合"""
# 参数名的列表
nameList = self.paramDict.keys()
paramList = self.paramDict.values()
# 使用迭代工具生产参数对组合
productList = list(product(*paramList))
# 把参数对组合打包到一个个字典组成的列表中
settingList = []
for p in productList:
d = dict(zip(nameList, p))
settingList.append(d)
return settingList
#----------------------------------------------------------------------
def setOptimizeTarget(self, target):
"""设置优化目标字段"""
self.optimizeTarget = target
#----------------------------------------------------------------------
def formatNumber(n):
"""格式化数字到字符串"""
n = round(n, 2) # 保留两位小数
return format(n, ',') # 加上千分符
if __name__ == '__main__':
# 以下内容是一段回测脚本的演示,用户可以根据自己的需求修改
# 建议使用ipython notebook或者spyder来做回测
# 同样可以在命令模式下进行回测(一行一行输入运行)
from ctaDemo import *
# 创建回测引擎
engine = BacktestingEngine()
# 设置引擎的回测模式为K线
engine.setBacktestingMode(engine.BAR_MODE)
# 设置回测用的数据起始日期
engine.setStartDate('20110101')
# 载入历史数据到引擎中
engine.setDatabase(MINUTE_DB_NAME, 'IF0000')
# 设置产品相关参数
engine.setSlippage(0.2) # 股指1跳
engine.setRate(0.3/10000) # 万0.3
engine.setSize(300) # 股指合约大小
# 在引擎中创建策略对象
engine.initStrategy(DoubleEmaDemo, {})
# 开始跑回测
engine.runBacktesting()
# 显示回测结果
# spyder或者ipython notebook中运行时,会弹出盈亏曲线图
# 直接在cmd中回测则只会打印一些回测数值
engine.showBacktestingResult()
| mit |
ljwolf/spvcm | spvcm/tests/test_trace.py | 1 | 5417 | import numpy as np
import pandas as pd
from spvcm.abstracts import Hashmap, Trace
import unittest as ut
from spvcm._constants import RTOL, ATOL
import os
FULL_PATH = os.path.dirname(os.path.abspath(__file__))
class Test_Trace(ut.TestCase):
def setUp(self):
self.a = {chr(i+97):list(range(10)) for i in range(5)}
self.t = Trace(**self.a)
self.mt = Trace(self.a,self.a,self.a)
self.real_mt = Trace.from_csv(FULL_PATH + r'/data/south_mvcm_5000', multi=True)
self.real_singles = [Trace.from_csv(FULL_PATH + r'/data/south_mvcm_5000_{}.csv'
.format(i)) for i in range(4)]
def test_validate_names(self):
b = self.a.copy()
try:
bad_names = Trace(self.a,b,self.a,self.a)
except KeyError:
pass
# tests
def test_slicing(self):
t = self.t
mt = self.mt
#1index
assert t[1] == {'a':1, 'b':1, 'c':1, 'd':1, 'e':1}
assert mt[6] == [{k:6 for k in ['a','b', 'c', 'd', 'e']}]*3
assert t[-1] =={k:9 for k in ['a','b', 'c', 'd', 'e']}
assert mt[-1] == [{k:9 for k in ['a','b', 'c', 'd', 'e']}]*3
assert t[2:5] == {k:list(range(2,5)) for k in ['a','b', 'c', 'd', 'e']}
assert mt[8:] == [ {k:list(range(8,10)) for k in ['a','b', 'c', 'd', 'e'] }] * 3
assert t[-4::2] == {k:[6,8] for k in ['a','b', 'c', 'd', 'e']}
assert (t['a'] == list(range(10))).all()
assert (mt['a'] == [list(range(10))]*3).all()
assert t[['a','b']] == {'a':list(range(10)), 'b':list(range(10))}
assert mt[['a','b']] == [{'a':list(range(10)), 'b':list(range(10))}]*3
#2index
assert t['a', 1] == 1
assert t[['a', 'b'], 1] == {'a':1, 'b':1}
assert (mt['e', 5] == [5]*3).all()
assert mt[['d', 'e'], 8:] == [{'d':[8,9], 'e':[8,9]}]*3
assert (t[0, 'a'] == list(range(10))).all()
assert t[0, ['a', 'b']] == {'a':list(range(10)), 'b':list(range(10))}
try:
t[1, ['a','c']]
raise Exception('This did not raise an exception within the slicer!')
except IndexError:
pass
assert mt[1:, ['a','c']] == [{'a':list(range(10)), 'c':list(range(10))}] * 2
assert (mt[2, 'a'] == list(range(10))).all()
assert t[0,-1] == {k:9 for k in ['a', 'b', 'c', 'd', 'e']}
assert t[0,:] == {k:list(range(10)) for k in ['a', 'b', 'c', 'd', 'e']}
assert mt[:, -1:-4:-1] == [{k:[9,8,7] for k in ['a', 'b', 'c', 'd', 'e']}]*3
#3index
assert t[0, 'a', -1] == 9
assert t[0, ['a','b'],-3::2] == {'a':[7,9], 'b':[7,9]}
assert t[0, : ,-1] == {k:9 for k in ['a','b','c','d','e']}
try:
t[1, 'a', -1]
raise Exception('this did not raise an exception when it should have')
except IndexError:
pass
assert (mt[1:, 'a', -1] == [9]*2).all()
assert mt[1:, ['a','b'], -2:] == [{'a':[8,9], 'b':[8,9]}]*2
assert (mt[2, 'a', 5::2] == [5,7,9]).all()
assert (mt[1:, 'a', -5::2] == [[5,7,9]]*2).all()
assert (mt[:, 'a', -5::2] == [[5,7,9]]*3).all()
assert mt[2, :, :] == {k:list(range(10)) for k in ['a','b','c','d','e']}
assert mt[:,:,:] == mt.chains
assert mt[:,:,:] is not mt.chains
def test_to_df(self):
df = self.t.to_df()
df2 = pd.DataFrame.from_dict(self.t.chains[0])
np.testing.assert_array_equal(df.values, df2.values)
mtdf = self.mt.to_df()
mtdf2 = [pd.DataFrame.from_dict(chain) for chain in self.mt.chains]
for i in range(len(mtdf2)):
np.testing.assert_array_equal(mtdf[i].values, mtdf2[i].values)
def test_from_df(self):
df = self.t.to_df()
new_trace = Trace.from_df(df)
assert new_trace == self.t
new_mt = Trace.from_df((df, df, df))
assert new_mt == self.t
def test_to_csv(self):
df = self.t.to_df()
self.t.to_csv('./test_to_csv.csv')
new_df = pd.read_csv('./test_to_csv.csv')
np.testing.assert_allclose(df.values, new_df.values,
rtol=RTOL, atol=ATOL)
os.remove('./test_to_csv.csv')
def test_from_csv(self):
self.t.to_csv('./test_from_csv.csv')
new_t = Trace.from_csv('./test_from_csv.csv')
assert self.t == new_t
os.remove('./test_from_csv.csv')
def test_single_roundtrips(self):
source_from_file = self.real_singles[0]
from_df = Trace.from_df(source_from_file.to_df())
source_from_file._assert_allclose(from_df)
def test_ordering(self):
for ch, alone in zip(self.real_mt.chains, self.real_singles):
Trace(ch)._assert_allclose(alone)
def test_multi_roundtrips(self):
dfs = self.real_mt.to_df()
new = Trace.from_df(dfs)
new._assert_allclose(self.real_mt)
@ut.skip
def test_from_pymc3(self):
raise NotImplementedError
@ut.skip
def test_plot(self):
try:
import matplotlib as mpl
mpl.use('Agg')
self.t.plot()
except:
raise Exception('Single trace plotting failed!')
try:
import matplotlib as mpl
mpl.use('Agg')
self.mt.plot()
except:
raise Exception('Multi-chain trace plotting failed!')
| mit |
ky822/scikit-learn | sklearn/setup.py | 225 | 2856 | import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('neural_network')
config.add_subpackage('preprocessing')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
# add cython extension module for isotonic regression
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
gclenaghan/scikit-learn | doc/conf.py | 210 | 8446 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2014, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
deepesch/scikit-learn | sklearn/tree/tree.py | 113 | 34767 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
AtsushiHashimoto/fujino_mthesis | tools/flowgraph/mst/generate_label.py | 1 | 6546 | # _*_ coding: utf-8 -*-
import os
import re
import io
import csv
import copy
import json
import argparse
import pandas as pd
def get_dataframe_of_flowgraph(flow_path):
"""
input: flow_graph_path
output: pandas dataframe
"""
ID = 0
POSITION = 1
ids = [] # It is not always number.
data = []
with open(flow_path, 'rt') as fin:
reader = csv.reader(fin, delimiter=',')
next(reader) # skip header
for row in reader:
id = row[ID]
ids.append(id)
step, sentence, word_no = row[POSITION].split('-')
data.append([int(float(step)), int(float(sentence)), int(float(word_no))] + row[POSITION+1:])
flow_df = pd.DataFrame(data)
flow_df.index = ids
flow_df.columns = ['step', 'sentence', 'word_no', 'NEtype', 'rNE', 'enter_edges']
return flow_df
def get_ingredients(file_path):
"""
input: ingredients_path
output: a list of ingredients
"""
INGREDIENT = 1
r = re.compile(ur'[0-9A-Za-zぁ-んァ-ン一-龥ー]+')
ingredients = []
with open(file_path, 'rt') as fin:
reader = csv.reader(fin, delimiter='\t')
for row in reader:
ingredient = row[INGREDIENT]
ingredient = unicode(ingredient, 'utf-8')
strs = r.findall(ingredient) # remove symbol
ingredient = ' '.join(strs)
ingredient = ingredient.encode('utf-8')
ingredients.append(ingredient)
return ingredients
def generate_label(flow_df, ingredients, synonym):
"""
input: pandas dataframe of flow graph
a list of ingredients
dictionary of synonym
"""
nodes = {} # key:index value:food or verb
linked_idxs = {} # key:index value:a list of linked indices & hop
idxs_per_step = {} # key:step value:indices in the step
ACTION_TYPES = ['Ac', 'Sf']
def _convert_by_synonym(word, partial_match=False):
if partial_match:
#正規表現により一番長くマッチするキーの値を返す
match_word = None
for synonym_word in synonym.keys():
match_object = re.search(synonym_word, word)
if match_object:
m_word = match_object.group()
if match_word == None or len(m_word) > len(match_word):
match_word = m_word
if match_word != None:
return synonym[match_word]
else:
return None
elif not partial_match:
if word in synonym:
return synonym[word]
else:
return None
def _make_node(rNE, NEtype, step, sentence, word_no, weight, ontology_pmatch = False):
rNE = unicode(rNE, 'utf-8')
node = {
'rNE':rNE,
'NEtype':NEtype,
'ontology':_convert_by_synonym(rNE, partial_match=ontology_pmatch),
'step':step,
'sentence':sentence,
'word_no':word_no,
'weight':weight
}
return node
def _related_food(idx):
row = flow_df.ix[idx]
# register step
if row['step'] not in idxs_per_step:
idxs_per_step[row['step']] = []
idxs_per_step[row['step']].append(idx)
if idx not in linked_idxs:
linked_idxs[idx] = []
# register previous linked indices
pre_idxs = row['enter_edges'].split()
for pre_idx in pre_idxs:
pre_idx, _ = pre_idx.split(':')
if pre_idx not in linked_idxs:
_related_food(pre_idx) # recursion
linked_idxs[idx] += [[link[0], link[1] + 1] for link in linked_idxs[pre_idx]]
# 食材の場合
if row['NEtype'] == 'F':
current_node_list = []
if row['rNE'] == '材料':
for i, food in enumerate(ingredients):
node = _make_node(food, 'F',row['step'],row['sentence'],row['word_no'],1.0,True)
linked_idxs[idx].append(['%s_%d'%(idx,i), 0]) # ホップ数
nodes['%s_%d'%(idx,i)] = node
current_node_list.append(node)
else:
node = _make_node(row['rNE'], 'F',row['step'],row['sentence'],row['word_no'],1.0)
linked_idxs[idx].append([idx, 0]) # ホップ数
nodes[idx] = node
current_node_list.append(node)
# verbを追加する
for link_idx, hop in linked_idxs[idx]:
link_node = nodes[link_idx]
if link_node['NEtype'] in ACTION_TYPES:
for node in current_node_list:
if link_node['NEtype'] not in node:
node[link_node['NEtype']] = []
node[link_node['NEtype']].append({"id":link_idx, "hop":-hop, "weight":1.0})
# 動詞の場合
elif row['NEtype'] in ACTION_TYPES:
node = _make_node(row['rNE'],row['NEtype'],row['step'],row['sentence'],row['word_no'],1.0)
nodes[idx] = node
linked_idxs[idx].append([idx, 0]) # ホップ数
# foodに追加する
for link in linked_idxs[idx]:
link_idx, hop = link
if nodes[link_idx]['NEtype'] == 'F':
food_node = nodes[link_idx]
if row['NEtype'] not in food_node:
food_node[row['NEtype']] = []
food_node[row['NEtype']].append({"id":idx, "hop":hop, "weight":1.0}) # index and hop
#フローグラフの各ノードに対して遡って現れる食材を求める
for idx in flow_df.index:
if idx not in linked_idxs:
_related_food(idx)
links = set()
label = {}
for step, idxs in sorted(idxs_per_step.items(), key = lambda x: x[0]):
label[step] = {}
links.update([link[0] for idx in idxs_per_step[step] for link in linked_idxs[idx]])
for link in links:
node = copy.deepcopy(nodes[link])
if node['NEtype'] == 'F':
for action_type in ACTION_TYPES:
if action_type in node:
# 後ろのステップで現れる動詞は除去
node[action_type] = [l for l in node[action_type] if l["id"] in links]
label[step][link] = node
return label
| bsd-2-clause |
ssaeger/scikit-learn | examples/bicluster/plot_spectral_coclustering.py | 276 | 1736 | """
==============================================
A demo of the Spectral Co-Clustering algorithm
==============================================
This example demonstrates how to generate a dataset and bicluster it
using the the Spectral Co-Clustering algorithm.
The dataset is generated using the ``make_biclusters`` function, which
creates a matrix of small values and implants bicluster with large
values. The rows and columns are then shuffled and passed to the
Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to
make biclusters contiguous shows how accurately the algorithm found
the biclusters.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_biclusters
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.metrics import consensus_score
data, rows, columns = make_biclusters(
shape=(300, 300), n_clusters=5, noise=5,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralCoclustering(n_clusters=5, random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.3f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.show()
| bsd-3-clause |
bnsgeyer/Copter3_4 | Tools/mavproxy_modules/lib/magcal_graph_ui.py | 108 | 8248 | # Copyright (C) 2016 Intel Corporation. All rights reserved.
#
# This file is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
from matplotlib.backends.backend_wxagg import FigureCanvas
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from pymavlink.mavutil import mavlink
from MAVProxy.modules.lib import wx_processguard
from MAVProxy.modules.lib.wx_loader import wx
import geodesic_grid as grid
class MagcalPanel(wx.Panel):
_status_markup_strings = {
mavlink.MAG_CAL_NOT_STARTED: 'Not started',
mavlink.MAG_CAL_WAITING_TO_START: 'Waiting to start',
mavlink.MAG_CAL_RUNNING_STEP_ONE: 'Step one',
mavlink.MAG_CAL_RUNNING_STEP_TWO: 'Step two',
mavlink.MAG_CAL_SUCCESS: '<span color="blue">Success</span>',
mavlink.MAG_CAL_FAILED: '<span color="red">Failed</span>',
}
_empty_color = '#7ea6ce'
_filled_color = '#4680b9'
def __init__(self, *k, **kw):
super(MagcalPanel, self).__init__(*k, **kw)
facecolor = self.GetBackgroundColour().GetAsString(wx.C2S_HTML_SYNTAX)
fig = plt.figure(facecolor=facecolor, figsize=(1,1))
self._canvas = FigureCanvas(self, wx.ID_ANY, fig)
self._canvas.SetMinSize((300,300))
self._id_text = wx.StaticText(self, wx.ID_ANY)
self._status_text = wx.StaticText(self, wx.ID_ANY)
self._completion_pct_text = wx.StaticText(self, wx.ID_ANY)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self._id_text)
sizer.Add(self._status_text)
sizer.Add(self._completion_pct_text)
sizer.Add(self._canvas, proportion=1, flag=wx.EXPAND)
self.SetSizer(sizer)
ax = fig.add_subplot(111, axis_bgcolor=facecolor, projection='3d')
self.configure_plot(ax)
def configure_plot(self, ax):
extra = .5
lim = grid.radius + extra
ax.set_xlim3d(-lim, lim)
ax.set_ylim3d(-lim, lim)
ax.set_zlim3d(-lim, lim)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.invert_zaxis()
ax.invert_xaxis()
ax.set_aspect('equal')
self._polygons_collection = Poly3DCollection(
grid.sections_triangles,
edgecolors='#386694',
)
ax.add_collection3d(self._polygons_collection)
def update_status_from_mavlink(self, m):
status_string = self._status_markup_strings.get(m.cal_status, '???')
self._status_text.SetLabelMarkup(
'<b>Status:</b> %s' % status_string,
)
def mavlink_magcal_report(self, m):
self.update_status_from_mavlink(m)
self._completion_pct_text.SetLabel('')
def mavlink_magcal_progress(self, m):
facecolors = []
for i, mask in enumerate(m.completion_mask):
for j in range(8):
section = i * 8 + j
if mask & 1 << j:
facecolor = self._filled_color
else:
facecolor = self._empty_color
facecolors.append(facecolor)
self._polygons_collection.set_facecolors(facecolors)
self._canvas.draw()
self._id_text.SetLabelMarkup(
'<b>Compass id:</b> %d' % m.compass_id
)
self._completion_pct_text.SetLabelMarkup(
'<b>Completion:</b> %d%%' % m.completion_pct
)
self.update_status_from_mavlink(m)
_legend_panel = None
@staticmethod
def legend_panel(*k, **kw):
if MagcalPanel._legend_panel:
return MagcalPanel._legend_panel
p = MagcalPanel._legend_panel = wx.Panel(*k, **kw)
sizer = wx.BoxSizer(wx.HORIZONTAL)
p.SetSizer(sizer)
marker = wx.Panel(p, wx.ID_ANY, size=(10, 10))
marker.SetBackgroundColour(MagcalPanel._empty_color)
sizer.Add(marker, flag=wx.ALIGN_CENTER)
text = wx.StaticText(p, wx.ID_ANY)
text.SetLabel('Sections not hit')
sizer.Add(text, border=4, flag=wx.ALIGN_CENTER | wx.LEFT)
marker = wx.Panel(p, wx.ID_ANY, size=(10, 10))
marker.SetBackgroundColour(MagcalPanel._filled_color)
sizer.Add(marker, border=10, flag=wx.ALIGN_CENTER | wx.LEFT)
text = wx.StaticText(p, wx.ID_ANY)
text.SetLabel('Sections hit')
sizer.Add(text, border=4, flag=wx.ALIGN_CENTER | wx.LEFT)
return p
class MagcalFrame(wx.Frame):
def __init__(self, conn):
super(MagcalFrame, self).__init__(
None,
wx.ID_ANY,
title='Magcal Graph',
)
self.SetMinSize((300, 300))
self._conn = conn
self._main_panel = wx.ScrolledWindow(self, wx.ID_ANY)
self._main_panel.SetScrollbars(1, 1, 1, 1)
self._magcal_panels = {}
self._sizer = wx.BoxSizer(wx.VERTICAL)
self._main_panel.SetSizer(self._sizer)
idle_text = wx.StaticText(self._main_panel, wx.ID_ANY)
idle_text.SetLabelMarkup('<i>No calibration messages received yet...</i>')
idle_text.SetForegroundColour('#444444')
self._sizer.AddStretchSpacer()
self._sizer.Add(
idle_text,
proportion=0,
flag=wx.ALIGN_CENTER | wx.ALL,
border=10,
)
self._sizer.AddStretchSpacer()
self._timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.timer_callback, self._timer)
self._timer.Start(200)
def add_compass(self, id):
if not self._magcal_panels:
self._sizer.Clear(deleteWindows=True)
self._magcal_panels_sizer = wx.BoxSizer(wx.HORIZONTAL)
self._sizer.Add(
self._magcal_panels_sizer,
proportion=1,
flag=wx.EXPAND,
)
legend = MagcalPanel.legend_panel(self._main_panel, wx.ID_ANY)
self._sizer.Add(
legend,
proportion=0,
flag=wx.ALIGN_CENTER,
)
self._magcal_panels[id] = MagcalPanel(self._main_panel, wx.ID_ANY)
self._magcal_panels_sizer.Add(
self._magcal_panels[id],
proportion=1,
border=10,
flag=wx.EXPAND | wx.ALL,
)
def timer_callback(self, evt):
close_requested = False
mavlink_msgs = {}
while self._conn.poll():
m = self._conn.recv()
if isinstance(m, str) and m == 'close':
close_requested = True
continue
if m.compass_id not in mavlink_msgs:
# Keep the last two messages so that we get the last progress
# if the last message is the calibration report.
mavlink_msgs[m.compass_id] = [None, m]
else:
l = mavlink_msgs[m.compass_id]
l[0] = l[1]
l[1] = m
if close_requested:
self._timer.Stop()
self.Destroy()
return
if not mavlink_msgs:
return
needs_fit = False
for k in mavlink_msgs:
if k not in self._magcal_panels:
self.add_compass(k)
needs_fit = True
if needs_fit:
self._sizer.Fit(self)
for k, l in mavlink_msgs.items():
for m in l:
if not m:
continue
panel = self._magcal_panels[k]
if m.get_type() == 'MAG_CAL_PROGRESS':
panel.mavlink_magcal_progress(m)
elif m.get_type() == 'MAG_CAL_REPORT':
panel.mavlink_magcal_report(m)
| gpl-3.0 |
jbkopecky/housebot | models/neural_net.py | 1 | 3417 | from pipelines import ItemSelector
from pipelines import MyOneHotEncoder
from pipelines import FindReplace
from pipelines import ReplaceNaN
from utils import plot_results
from utils import make_xy_data
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import train_test_split
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.decomposition import TruncatedSVD
from sklearn.neural_network import MLPRegressor as nnet
import matplotlib.pyplot as plt
fr_arrondissement = [
("Le ", ""),
('-', "_"),
(' ', "_"),
('Saint', 'st'),
('_le_Pont', ''),
('_Perret', ''),
]
model = Pipeline([
('Union', FeatureUnion([
('Surface', Pipeline([
('Selection', ItemSelector(['surface_m2'])),
('Normalise', MinMaxScaler()),
]),
),
('Arrondissement', Pipeline([
('Selection', ItemSelector(['arrondissement'])),
('Clean', FindReplace(fr_arrondissement)),
('MyOneHotEncoder', MyOneHotEncoder()),
('Normalise', MinMaxScaler()),
]),
),
('orientation', Pipeline([
('Selection', ItemSelector(['orientation'])),
('Encoder', MyOneHotEncoder(do_parse=True)),
('Replace_NaN', ReplaceNaN(0.)),
('Normalise', MinMaxScaler()),
]),
),
('Max_Etages', Pipeline([
('Selection', ItemSelector(['etage', 'etage 2'])),
('Imputer', Imputer(strategy="most_frequent")),
('Normalise', MinMaxScaler()),
]),
),
('NoNaNFeats', Pipeline([
('Selection', ItemSelector(['piece'])),
('Normalise', MinMaxScaler()),
]),
),
('Description', Pipeline([
('Selection', ItemSelector('description')),
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('best', TruncatedSVD(n_components=1000)),
]),
),
('General', Pipeline([
('Selection', ItemSelector(['balcon', 'visavis', 'piscine',
'box', 'meuble', 'refaitaneuf'])),
('Replace_NaN', ReplaceNaN(0.)),
('Normalise', MinMaxScaler()),
]),
),
], n_jobs=-1),
),
('nnet', nnet(activation='logistic', alpha='0.001', random_state=32, verbose=2)),
])
X, y = make_xy_data('./data/merged_data.csv', ['surface_m2', 'piece'])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=32)
model.fit(X_train, y_train)
y_test_pred = model.predict(X_test)
y_train_pred = model.predict(X_train)
train_error = mean_squared_error(y_train_pred, y_train)
test_error = mean_squared_error(y_test_pred, y_test)
train_title = "Train error: ", train_error
test_title = "Test error: ", test_error
plot_results({train_title: [y_train_pred, y_train],
test_title: [y_test_pred, y_test]})
plt.savefig("./plots/neural_net.png")
plt.show()
| mit |
ejolly/pymer4 | examples/example_04_simulating_data.py | 2 | 5447 | """
4. Simulating Data
==================
:code:`pymer4` comes with some easy-to-use functions for simulating data that can be modeled with :code:`Lm` and multi-level data that can be modeled with :code:`Lmer` or :code:`Lm2`. These functions can be found in the :code:`pymer4.simulate` module and are aptly named: :code:`simulate_lm()` and :code:`simulate_lmm()` respectively.
:code:`pymer4` gives you a lot of control over what you want your data to look like by setting properties such as:
- Number of data points and number of coefficients
- Specific coefficient values
- Means and standard deviations of predictors
- Correlations between predictors
- Amount of error (noise) in the data
- Number of groups/clusters (multi-level data only)
- Variance of random effects (multi-level data only)
"""
###############################################################################
# Generating standard regression data
# -----------------------------------
# Generating data for a standard regression returns a pandas dataframe with outcome and predictor variables ready for use with :code:`Lm()`, along with an array of coefficients used to produce the data.
#
# Let's generate 500 observations, with coefficient values of: 1.2, -40.1, and 3. We also have an intercept with a value of 100. The means of the columns of our design matrix (i.e. means of the predictors) will be: 10, 30, and 1. We'll also add noise from a normal distribution with mean = 0, and sd = 5. Any correlations between predictors are purely random.
# Import the simulation function
from pymer4.simulate import simulate_lm
# Also fix the random number generator for reproducibility
import numpy as np
np.random.seed(10)
data, b = simulate_lm(
500, 3, coef_vals=[100, 1.2, -40.1, 3], mus=[10, 30, 1], noise_params=(0, 5)
)
print(f"True coefficients:\n{b}\n")
print(f"Data:\n{data.head()}")
###############################################################################
# Here are some checks you might do to make sure the data were correctly generated:
###############################################################################
# Check the means of predictors
print(data.iloc[:, 1:].mean(axis=0))
###############################################################################
# Check correlations between predictors
print(data.iloc[:, 1:].corr())
###############################################################################
# Check coefficient recovery
from pymer4.models import Lm
model = Lm("DV ~ IV1+IV2+IV3", data=data)
model.fit(summarize=False)
print(model.coefs.loc[:, "Estimate"])
###############################################################################
# You have the option of being as general or specific as you like when generating data. Here's a simpler example that generates 100 observations with 5 predictors from a standard normal distribution, i.e. mean = 0, sd = 1 with random correlations between predictors. :code:`pymer4` will randomly decide what to set the coefficient values to.
data, b = simulate_lm(100, 5)
print(f"True coefficients:\n{b}\n")
print(f"Data:\n{data.head()}")
###############################################################################
# Generating multi-level regression data
# --------------------------------------
# Generating data for a multi-level regression is just as simple and returns a pandas dataframe with outcome and predictor variables ready for use with :code:`Lmer()`, another dataframe with group/cluster level coefficients (i.e. BLUPs), and a vector of population-level coefficients.
#
# Here's an example generating 5000 observations, organized as 100 groups with 50 observations each. We'll have three predictors with the coefficients: 1.8, -2, and 10. We also have an intercept with a coefficient of 4. The means of the columns of our design matrix (i.e. means of the predictors) will be: 10, 30, and 2. We'll also introduce correlations between our predictors of with a mean r of .15. We'll leave the default of standard normal noise i.e., mean = 0, and sd = 1.
from pymer4.simulate import simulate_lmm
num_obs = 50
num_coef = 3
num_grps = 100
mus = [10.0, 30.0, 2.0]
coef_vals = [4.0, 1.8, -2, 10]
corrs = 0.15
data, blups, b = simulate_lmm(
num_obs, num_coef, num_grps, coef_vals=coef_vals, mus=mus, corrs=corrs
)
print(f"True coefficients:\n{b}\n")
print(f"BLUPs:\n{blups.head()}\n")
print(f"Data:\n{data.head()}\n")
###############################################################################
# Again here are some checks you might do to make sure the data were correctly generated (by default lmm data will generally be a bit noisier due to within and across group/cluster variance; see the API for how to customize this):
# Group the data before running checks
group_data = data.groupby("Group")
###############################################################################
# Check mean of predictors within each group
print(group_data.apply(lambda grp: grp.iloc[:, 1:-1].mean(axis=0)))
###############################################################################
# Check correlations between predictors within each group
print(group_data.apply(lambda grp: grp.iloc[:, 1:-1].corr()))
###############################################################################
# Check coefficient recovery
from pymer4.models import Lmer
model = Lmer("DV ~ IV1+IV2+IV3 + (1|Group)", data=data)
model.fit(summarize=False)
print(model.coefs.loc[:, "Estimate"])
| mit |
knutfrode/opendrift | examples/example_entrainment_rate_oil_types.py | 1 | 3035 | #!/usr/bin/env python
from datetime import datetime, timedelta
from opendrift.models.openoil3D import OpenOil3D
import matplotlib.pyplot as plt
import numpy as np
######################################################
# Li et al. (2017) entrainment rate (light vs. heavy oil)
######################################################
o2 = OpenOil3D(loglevel=0, weathering_model='noaa')
o2.fallback_values['land_binary_mask'] = 0
o2.fallback_values['x_sea_water_velocity'] = -.2
o2.fallback_values['y_sea_water_velocity'] = 0
o2.fallback_values['x_wind'] = 10
o2.fallback_values['y_wind'] = 0
o2.fallback_values['sea_surface_wave_stokes_drift_x_velocity'] = .3
o2.fallback_values['sea_surface_wave_stokes_drift_y_velocity'] = 0
o2.set_config('wave_entrainment:entrainment_rate', 'Li et al. (2017)')
o2.set_config('wave_entrainment:droplet_size_distribution', 'Johansen et al. (2015)')
o2.set_config('processes:evaporation', False)
o2.set_config('processes:dispersion', False)
o2.set_config('turbulentmixing:droplet_diameter_min_wavebreaking', 1e-6)
o2.set_config('turbulentmixing:droplet_diameter_max_wavebreaking', 1e-3)
o2.seed_elements(lon=4, lat=60, time=datetime.now(), number=1000,
radius=100, z=0, oiltype='TIA JUANA HEAVY, OIL & GAS')
o2.run(duration=timedelta(hours=12), time_step=900)
o3 = OpenOil3D(loglevel=0, weathering_model='noaa')
o3.fallback_values['land_binary_mask'] = 0
o3.fallback_values['x_sea_water_velocity'] = -.2
o3.fallback_values['y_sea_water_velocity'] = 0
o3.fallback_values['x_wind'] = 10
o3.fallback_values['y_wind'] = 0
o3.fallback_values['sea_surface_wave_stokes_drift_x_velocity'] = .3
o3.fallback_values['sea_surface_wave_stokes_drift_y_velocity'] = 0
o3.set_config('wave_entrainment:entrainment_rate', 'Li et al. (2017)')
o3.set_config('wave_entrainment:droplet_size_distribution', 'Johansen et al. (2015)')
o3.set_config('processes:evaporation', False)
o3.set_config('processes:dispersion', False)
o3.set_config('turbulentmixing:droplet_diameter_min_wavebreaking', 1e-6)
o3.set_config('turbulentmixing:droplet_diameter_max_wavebreaking', 1e-3)
o3.seed_elements(lon=4, lat=60, time=datetime.now(), number=1000,
radius=100, z=0, oiltype='TIA JUANA LIGHT, OIL & GAS') #'EKOFISK BLEND, STATOIL' similar ent.
o3.run(duration=timedelta(hours=12), time_step=900)
###########################
# Plotting and comparing
###########################
print('#######################')
print('Entrainment rate (heavy)', np.mean(o2.oil_wave_entrainment_rate()))
print('Entrainment rate (light)', np.mean(o3.oil_wave_entrainment_rate()))
print('Viscosity (heavy)', np.mean(o2.elements.viscosity))
print('Viscosity (light)', np.mean(o3.elements.viscosity))
print('Density (heavy)', np.mean(o2.elements.density))
print('Density (light)', np.mean(o3.elements.density))
print('#######################')
o2.plot_oil_budget()
o3.plot_oil_budget()
legend = ['TIA JUANA HEAVY', 'TIA JUANA LIGHT']
o2.animation_profile(compare=o3, legend=legend)
o2.animation(compare=o3, legend=legend)
| gpl-2.0 |
larsoner/mne-python | mne/tests/test_import_nesting.py | 6 | 1371 | # Author: Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import sys
from mne.utils import run_subprocess
run_script = """
import sys
import mne
out = set()
# check scipy
ok_scipy_submodules = set(['scipy', 'numpy', # these appear in old scipy
'fftpack', 'lib', 'linalg', 'fft',
'misc', 'sparse', 'version'])
scipy_submodules = set(x.split('.')[1] for x in sys.modules.keys()
if x.startswith('scipy.') and '__' not in x and
not x.split('.')[1].startswith('_')
and sys.modules[x] is not None)
bad = scipy_submodules - ok_scipy_submodules
if len(bad) > 0:
out |= {'scipy submodules: %s' % list(bad)}
# check sklearn and others
for x in sys.modules.keys():
for key in ('sklearn', 'pandas', 'mayavi', 'pyvista', 'matplotlib',
'dipy', 'nibabel', 'cupy', 'picard', 'pyvistaqt'):
if x.startswith(key):
out |= {key}
if len(out) > 0:
print('\\nFound un-nested import(s) for %s' % (sorted(out),), end='')
exit(len(out))
"""
def test_module_nesting():
"""Test that module imports are properly nested."""
stdout, stderr, code = run_subprocess([sys.executable, '-c', run_script],
return_code=True)
assert code == 0, stdout + stderr
| bsd-3-clause |
jorik041/scikit-learn | sklearn/tests/test_isotonic.py | 230 | 11087 | import numpy as np
import pickle
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permuation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [0, 1, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5, 6]
y_true = [0, 1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
| bsd-3-clause |
victorbergelin/scikit-learn | sklearn/ensemble/partial_dependence.py | 251 | 15097 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
ManyBodyPhysics/LectureNotesPhysics | Programs/Chapter10-programs/python/imsrg_pairing/plot_imsrg_flow.py | 2 | 4799 | #!/usr/bin/env python
#------------------------------------------------------------------------------
# plot_imsrg_flow.py
#
# author: H. Hergert
# version: 1.0.1
# date: Jul 6, 2020
#
# tested with Python v2.7 and v3.7
#
#------------------------------------------------------------------------------
from sys import argv
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from matplotlib.colors import SymLogNorm, Normalize
from mpl_toolkits.axes_grid1 import AxesGrid, make_axes_locatable
import numpy as np
from numpy import array, dot, diag, reshape
#------------------------------------------------------------------------------
# plot helpers
#------------------------------------------------------------------------------
# format tick labels using LaTeX-like math fonts
def myLabels(x, pos):
return '$%s$'%x
def myLogLabels(x, pos):
return '$10^{%d}$'%(np.log10(x))
# save these settings for use in both following plots
def myPlotSettings(ax):
ax.minorticks_on()
ax.tick_params(axis='both',which='major',width=1.5,length=8)
ax.tick_params(axis='both',which='minor',width=1.5,length=5)
ax.tick_params(axis='both',width=2,length=10,labelsize=20)
for s in ['left', 'right', 'top', 'bottom']:
ax.spines[s].set_linewidth(2)
return
#------------------------------------------------------------------------------
# plot flow
#------------------------------------------------------------------------------
def plot_energies(data, exact, filename):
# diagonals vs. eigenvalues on absolute scale
fig, ax = plt.subplots()
plt.semilogx([1.0e-8,1.0e-4,1.0,100], [exact,exact,exact,exact], linewidth=2,
color='black', linestyle='dashed', dashes=(10,5))
plt.semilogx(data[:,0], data[:,1], color='blue', marker='o', markersize=9, label='$E$')
plt.semilogx(data[:,0], data[:,1]+data[:,2], color='red', marker='s', markersize=9, label='$+\Delta E^{(2)}$')
plt.semilogx(data[:,0], data[:,1]+data[:,2]+data[:,3], color='green', marker='D', markersize=9,label='$+\Delta E^{(3)}$')
myPlotSettings(ax)
ax.xaxis.set_major_formatter(FuncFormatter(myLogLabels))
ax.yaxis.set_major_formatter(FuncFormatter(myLabels))
ax.set_xlim([0.00006,13])
ymin,ymax=ax.get_ylim()
ax.set_ylim(ymin-0.005,ymax+0.005)
plt.xlabel('$s$', fontsize=20)
plt.ylabel('$E\,\mathrm{[a.u.]}$', fontsize=20)
# plt.legend(bbox_to_anchor=(0.35, 0.05), loc=3, borderaxespad=0.5)
plt.legend(loc=1, borderaxespad=0.5)
plt.savefig("%s.pdf"%(filename), bbox_inches="tight", pad_inches=0.05)
plt.show()
plt.close()
return
def plot_norms_loglog(data, filename):
# diagonals vs. eigenvalues on absolute scale
fig, ax = plt.subplots()
plt.loglog(data[:,0], data[:,6], basex=10, color='blue', marker='o', markersize=9, label='$||\eta||$')
plt.loglog(data[:,0], data[:,8], basex=10, color='red', marker='s', markersize=9, label='$||\Gamma_{od}||$')
myPlotSettings(ax)
ax.xaxis.set_major_formatter(FuncFormatter(myLogLabels))
ax.yaxis.set_major_formatter(FuncFormatter(myLogLabels))
plt.xlabel('$s$', fontsize=20)
plt.ylabel('$||\eta||, ||\Gamma_{od}||\, [\mathrm{a.u.}]$', fontsize=20)
plt.legend(bbox_to_anchor=(0.05, 0.05), loc=3, borderaxespad=0.5)
plt.savefig("%s.norms.pdf"%(filename.rsplit(".",1)[0]), bbox_inches="tight", pad_inches=0.05)
plt.show()
plt.close()
return
def plot_norms_semilog(data, filename):
# diagonals vs. eigenvalues on absolute scale
fig, ax = plt.subplots()
plt.semilogy(data[:,0], data[:,6], basey=10, color='blue', marker='o', markersize=9, label='$||\eta||$')
plt.semilogy(data[:,0], data[:,8], basey=10, color='red', marker='s', markersize=9, label='$||\Gamma_{od}||$')
myPlotSettings(ax)
ax.xaxis.set_major_formatter(FuncFormatter(myLabels))
ax.yaxis.set_major_formatter(FuncFormatter(myLogLabels))
plt.xlabel('$s$', fontsize=20)
plt.ylabel('$||\eta||, ||\Gamma_{od}||\, [\mathrm{a.u.}]$', fontsize=20)
plt.legend(bbox_to_anchor=(0.05, 0.05), loc=3, borderaxespad=0.5)
plt.savefig("%s.norms.semilog.pdf"%(filename.rsplit(".",1)[0]), bbox_inches="tight", pad_inches=0.05)
plt.show()
plt.close()
return
#------------------------------------------------------------------------------
# main
#------------------------------------------------------------------------------
def main():
filename = argv[1]
exact = argv[2]
# read data from file
data = np.loadtxt(filename, skiprows=2)
plot_energies(data, exact, filename)
plot_norms_loglog(data,filename)
plot_norms_semilog(data,filename)
return
#------------------------------------------------------------------------------
# make executable
#------------------------------------------------------------------------------
if __name__ == "__main__":
main()
| cc0-1.0 |
gclenaghan/scikit-learn | examples/ensemble/plot_adaboost_twoclass.py | 347 | 3268 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| bsd-3-clause |
tsbischof/photon_correlation | python/photon_correlation/FLID.py | 1 | 2022 | #!/usr/bin/env python
import sys
import csv
import bz2
import matplotlib.pyplot as plt
import numpy
def force_aspect(ax,aspect=1):
im = ax.get_images()
extent = im[0].get_extent()
ax.set_aspect(abs((extent[1]-extent[0])/(extent[3]-extent[2]))/aspect)
class FLID(object):
def __init__(self, filename=None):
self.intensity = list()
self.arrival_time = list()
self.events = list()
self.counts = list()
if filename is not None:
self.from_filename(filename)
def from_filename(self, filename):
if not os.path.exists(filename):
bz2_name = "{}.bz2".format(filename)
if os.path.exists(bz2_name):
filename = bz2_name
if filename.endswith("bz2"):
open_f = lambda x: bz2.open(x, "rt")
else:
open_f = open
with open_f(filename) as stream_in:
return(self.from_stream(stream_in))
def from_stream(self, stream_in):
reader = csv.reader(stream_in)
time_bins = (next(reader), next(reader))
for left, right in zip(time_bins[0][3:], time_bins[1][3:]):
self.arrival_time.append((float(left), float(right)))
for line in reader:
self.intensity.append((float(line[0]), float(line[1])))
self.events.append(int(line[2]))
self.counts.append(list(map(int, line[3:])))
return(self)
def make_figure(self):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.imshow(self.counts,
interpolation="none",
origin="lower",
extent=[self.arrival_time[0][0],
self.arrival_time[-1][1],
self.intensity[0][0],
self.intensity[-1][1]])
force_aspect(ax)
ax.set_xlabel("Time/ps")
ax.set_ylabel("Counts per bin")
fig.tight_layout()
return(fig)
| bsd-3-clause |
gammalib/gammalib | inst/cta/test/dev/test_likelihood_profile.py | 1 | 9558 | #! /usr/bin/env python
# ==========================================================================
# This script performs a likelihood profile computation
#
# Copyright (C) 2014 Juergen Knoedlseder
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ==========================================================================
import gammalib
import time
try:
import matplotlib.pyplot as plt
has_matplotlib = True
except ImportError:
print("Matplotlib is not (correctly) installed on your system.")
has_matplotlib = False
# ======================= #
# Show likelihood profile #
# ======================= #
def show_likelihood_profile(result):
"""
"""
# Continue only if matplotlib is available
if has_matplotlib:
# Create log-likelohood profile
plt.figure(1)
plt.title("Log-likelihood profile for parameter \""+result["parname"]+"\"")
# Plot logL profile
plt.plot(result["value"], result["logL"], 'ro-')
# Build gradient array
gradient = []
for value in result["value"]:
grad = result["opt"]["gradient"] * (value - result["opt"]["value"]) + \
result["opt"]["logL"]
gradient.append(grad)
# Plot gradient
plt.plot(result["value"], gradient, 'b-')
# Set axes
plt.xlabel(result["parname"])
plt.ylabel("log-likelihood")
# Create Npred profile
plt.figure(2)
plt.title("Npred profile for parameter \""+result["parname"]+"\"")
# Plot Npred profile
plt.plot(result["value"], result["npred"], 'ro-')
# Set axes
plt.xlabel(result["parname"])
plt.ylabel("Npred")
# Notify
print("PLEASE CLOSE WINDOW TO CONTINUE ...")
# Show plot
plt.show()
# Return
return
# ========================== #
# Compute likelihood profile #
# ========================== #
def compute_likelihood_profile(obs, parname="Radius", scale=1.0e-5, steps=10):
"""
Compute the log-likelihood profile for a given parameter.
Keywords:
parname - Parameter name
scale - Variation size of parameter for profile computation
steps - Number of steps to the left and to the right of the optimum
"""
# Free parameter for which profile is to be determined
obs.models()[0][parname].free()
# Allocate Levenberg-Marquardt optimizer
log = gammalib.GLog()
log.cout(True)
opt = gammalib.GOptimizerLM(log)
opt.eps(5.0e-3)
# Optimize model parameters and compute errors
obs.optimize(opt)
obs.errors(opt)
# Log optimizer into console
print(opt)
print(obs)
print(obs.models())
# Get optimizer parameter and gradient
opt_logL = obs.logL()
opt_npred = obs.npred()
opt_value = obs.models()[0][parname].value()
opt_gradient = obs.models()[0][parname].gradient()
# Log results into console
print("%.10f %.10f %.10f %.10f" % (opt_value, opt_logL, opt_npred, opt_gradient))
# Allocate arrays
values = []
logLs = []
npreds = []
# Loop over values
for i in range(-steps, steps+1):
# Set new model parameter
obs.models()[0][parname].remove_range()
obs.models()[0][parname].value(opt_value+i*scale)
# Evaluate log-likelihood and retrieve results
obs.eval()
value = obs.models()[0][parname].value()
logL = obs.logL()
npred = obs.npred()
values.append(value)
logLs.append(logL)
npreds.append(npred)
# Log results into console
print("%.10f %.10f %.10f" % (value, obs.logL(), obs.npred()))
# Build result dictionary
result = {'parname': parname,
'opt': {'logL': opt_logL, 'npred': opt_npred, \
'value': opt_value, 'gradient': opt_gradient}, \
'value': values, 'logL': logLs, 'npred': npreds}
# Return result
return result
# ===================================================== #
# CTA unbinned or binned likelihood profile computation #
# ===================================================== #
def original_likelihood_profile(model, cntmap, irf, caldb, cntref):
"""
Perform binned likelihood profile computation.
"""
# Dump header
print("Unbinned or Binned likelihood profile computation:")
print("==================================================")
# Allocate empty observation container
obs = gammalib.GObservations()
# Allocate empty CTA observation
cta = gammalib.GCTAObservation()
# Load counts map into CTA observation
cta.load(cntmap)
# Specify response for CTA observation
cta.response(irf, caldb)
# Append CTA observation to observation container
obs.append(cta)
# Load model
models = gammalib.GModels(model)
obs.models(models)
# Get start CPU time
tstart = time.clock()
# Compute likelihood profile
result = compute_likelihood_profile(obs)
# Get stop CPU time
tstop = time.clock()
telapsed = tstop - tstart
print(" Elapsed time ..............: %.3f sec" % telapsed)
# Show likelihood profile
show_likelihood_profile(result)
# Return
return
# ========================================== #
# CTA stacked likelihood profile computation #
# ========================================== #
def stacked_likelihood_profile(model, cntmap, expcube, psfcube, cntref):
"""
Perform stacked likelihood profile computation.
"""
# Dump header
print("Stacked likelihood profile computation:")
print("=======================================")
# Allocate empty observation container
obs = gammalib.GObservations()
# Allocate empty CTA observation
cta = gammalib.GCTAObservation(cntmap, expcube, psfcube)
# Append CTA observation to observation container
obs.append(cta)
# Load model
models = gammalib.GModels(model)
obs.models(models)
# Get start CPU time
tstart = time.clock()
# Compute likelihood profile
result = compute_likelihood_profile(obs)
# Get stop CPU time
tstop = time.clock()
telapsed = tstop - tstart
print(" Elapsed time ..............: %.3f sec" % telapsed)
# Show likelihood profile
show_likelihood_profile(result)
# Return
return
#==========================#
# Main routine entry point #
#==========================#
if __name__ == '__main__':
"""
Perform CTA response computation benchmark.
"""
# Dump header
print("")
print("**************************************")
print("* CTA likelihood profile computation *")
print("**************************************")
# Set test
#test = "point"
#test = "gauss"
test = "disk"
#test = "ldisk"
#test = "shell"
#test = "ellipse"
#test = "diffuse"
# Set response parameters
irf = "cta_dummy_irf"
caldb = "../caldb"
#irf = "irf_file.fits"
#caldb = "../caldb/data/cta/e/bcf/IFAE20120510_50h"
expcube = "data/expcube.fits"
psfcube = "data/psfcube.fits"
# Set test dependent filenames
if test == "point":
model = "data/crab_ptsrc.xml"
events = "data/crab_events.fits"
cntmap = "data/crab_cntmap.fits"
cntref = 934.3 # +/- 3.1 (from ctobssim)
elif test == "disk":
model = "data/crab_disk.xml"
events = "data/crab_disk_events.fits"
cntmap = "data/crab_disk_cntmap.fits"
cntref = 933.6 # +/- 3.1 (from ctobssim)
elif test == "ldisk":
model = "crab_ldisk.xml"
events = "crab_ldisk_events.fits"
cntmap = "data/crab_disk_cntmap.fits"
cntref = 933.6 # +/- 3.1 (from ctobssim)
elif test == "gauss":
model = "data/crab_gauss.xml"
events = "data/crab_gauss_events.fits"
cntmap = "data/crab_gauss_cntmap.fits"
cntref = 935.1 # +/- 3.1 (from ctobssim)
elif test == "shell":
model = "data/crab_shell.xml"
events = "data/crab_shell_events.fits"
cntmap = "data/crab_shell_cntmap.fits"
cntref = 933.5 # +/- 3.1 (from ctobssim)
elif test == "ellipse":
model = "data/crab_edisk.xml"
events = "data/crab_edisk_events.fits"
cntmap = "data/crab_edisk_cntmap.fits"
cntref = 845.9 # +/- 2.9 (from ctobssim)
elif test == "diffuse":
model = "data/radio.xml"
events = "data/radio_events.fits"
cntmap = "data/radio_cntmap.fits"
cntref = 337.5 # +/- 1.8 (from ctobssim)
# Perform unbinned computation
#original_likelihood_profile(model, events, irf, gammalib.GCaldb(caldb), cntref)
# Perform binned computation
#original_likelihood_profile(model, cntmap, irf, gammalib.GCaldb(caldb), cntref)
# Perform stacked computation
stacked_likelihood_profile(model, cntmap, expcube, psfcube, cntref)
| gpl-3.0 |
thunderhoser/GewitterGefahr | gewittergefahr/scripts/compare_human_vs_machine_interpretn.py | 1 | 26120 | """Compares human-generated vs. machine-generated interpretation map.
This script handles 3 types of interpretation maps:
- saliency
- Grad-CAM
- guided Grad-CAM
"""
import os.path
import argparse
import numpy
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as pyplot
from gewittergefahr.gg_utils import human_polygons
from gewittergefahr.gg_utils import storm_tracking_utils as tracking_utils
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.gg_utils import error_checking
from gewittergefahr.deep_learning import cnn
from gewittergefahr.deep_learning import saliency_maps
from gewittergefahr.deep_learning import gradcam
from gewittergefahr.deep_learning import training_validation_io as trainval_io
from gewittergefahr.plotting import plotting_utils
from gewittergefahr.plotting import radar_plotting
TOLERANCE = 1e-6
METRES_TO_KM = 0.001
TIME_FORMAT = '%Y-%m-%d-%H%M%S'
MARKER_TYPE = 'o'
MARKER_SIZE = 15
MARKER_EDGE_WIDTH = 1
MARKER_COLOUR = numpy.full(3, 0.)
HUMAN_STRING = 'H'
MACHINE_STRING = 'M'
OVERLAY_FONT_SIZE = 20
OVERLAY_FONT_COLOUR = numpy.full(3, 0.)
FIGURE_WIDTH_INCHES = 15
FIGURE_HEIGHT_INCHES = 15
FIGURE_RESOLUTION_DPI = 300
MACHINE_POSITIVE_MASK_KEY = 'machine_positive_mask_matrix_2d'
POSITIVE_IOU_KEY = 'positive_iou'
MACHINE_NEGATIVE_MASK_KEY = 'machine_negative_mask_matrix_2d'
NEGATIVE_IOU_KEY = 'negative_iou'
HUMAN_FILE_ARG_NAME = 'input_human_file_name'
MACHINE_FILE_ARG_NAME = 'input_machine_file_name'
GUIDED_GRADCAM_ARG_NAME = 'guided_gradcam_flag'
THRESHOLD_ARG_NAME = 'abs_percentile_threshold'
OUTPUT_DIR_ARG_NAME = 'output_dir_name'
HUMAN_FILE_HELP_STRING = (
'Path to file with human-generated polygons. Will be read by '
'`human_polygons.read_polygons`.')
MACHINE_FILE_HELP_STRING = (
'Path to file with machine-generated interpretation map. Will be read by '
'`saliency_maps.read_standard_file`, `saliency_maps.read_pmm_file`, '
'`gradcam.read_pmm_file`, or `gradcam.read_pmm_file`.')
GUIDED_GRADCAM_HELP_STRING = (
'[used only if `{0:s}` contains Grad-CAM output] Boolean flag. If 1, will '
'compare human polygons with guided Grad-CAM. If 0, will compare with '
'simple Grad-CAM.'
).format(MACHINE_FILE_ARG_NAME)
THRESHOLD_HELP_STRING = (
'Threshold for interpretation quantity (I). Human polygons will be turned '
'into interpretation maps by assuming that (1) all grid points in a '
'positive polygon have I >= p, where p is the `{0:s}`th percentile of '
'positive values in the machine-generated map; and (2) all grid points '
'inside a negative polygon have I <= q, where q is the (100 - `{0:s}`)th '
'percentile of negative values in the machine-generated map. If you want '
'this to be set adaptively, leave the argument alone.'
).format(THRESHOLD_ARG_NAME)
OUTPUT_DIR_HELP_STRING = (
'Name of output directory (figures will be saved here).')
INPUT_ARG_PARSER = argparse.ArgumentParser()
INPUT_ARG_PARSER.add_argument(
'--' + HUMAN_FILE_ARG_NAME, type=str, required=True,
help=HUMAN_FILE_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + MACHINE_FILE_ARG_NAME, type=str, required=True,
help=MACHINE_FILE_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + GUIDED_GRADCAM_ARG_NAME, type=int, required=False, default=0,
help=GUIDED_GRADCAM_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + THRESHOLD_ARG_NAME, type=float, required=False, default=-1.,
help=THRESHOLD_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + OUTPUT_DIR_ARG_NAME, type=str, required=True,
help=OUTPUT_DIR_HELP_STRING)
def _compute_iou(machine_mask_matrix, human_mask_matrix):
"""Computes IoU (intersection over union) between human and machine masks.
:param machine_mask_matrix: Boolean numpy array, representing areas of
extreme positive or negative interpretation values.
:param human_mask_matrix: Same but for human. The two numpy arrays must
have the same shape.
:return: iou: Intersection over union between the two masks.
"""
union_matrix = numpy.logical_or(machine_mask_matrix, human_mask_matrix)
intersection_matrix = numpy.logical_and(
machine_mask_matrix, human_mask_matrix)
return float(numpy.sum(intersection_matrix)) / numpy.sum(union_matrix)
def _plot_comparison_one_channel(
machine_mask_matrix_3d, human_mask_matrix_3d, channel_index,
axes_object_matrix):
"""Plots human/machine comparison for one channel.
J = number of panel rows
K = number of panel columns
:param machine_mask_matrix_3d: See doc for `_plot_comparison`.
:param human_mask_matrix_3d: Same.
:param channel_index: Channel index. Will plot comparison for only this
channel.
:param axes_object_matrix: J-by-K numpy array of axes handles (instances
of `matplotlib.axes._subplots.AxesSubplot`).
"""
i, j = numpy.unravel_index(
channel_index, axes_object_matrix.shape, order='F'
)
this_axes_object = axes_object_matrix[i, j]
these_grid_rows, these_grid_columns = numpy.where(numpy.logical_and(
machine_mask_matrix_3d[..., channel_index],
human_mask_matrix_3d[..., channel_index]
))
these_grid_rows = these_grid_rows + 0.5
these_grid_columns = these_grid_columns + 0.5
if len(these_grid_rows) > 0:
marker_colour_as_tuple = plotting_utils.colour_from_numpy_to_tuple(
MARKER_COLOUR)
this_axes_object.plot(
these_grid_columns, these_grid_rows, linestyle='None',
marker=MARKER_TYPE, markersize=MARKER_SIZE,
markeredgewidth=MARKER_EDGE_WIDTH,
markerfacecolor=marker_colour_as_tuple,
markeredgecolor=marker_colour_as_tuple)
these_grid_rows, these_grid_columns = numpy.where(numpy.logical_and(
machine_mask_matrix_3d[..., channel_index],
numpy.invert(human_mask_matrix_3d[..., channel_index])
))
these_grid_rows = these_grid_rows + 0.5
these_grid_columns = these_grid_columns + 0.5
for k in range(len(these_grid_rows)):
this_axes_object.text(
these_grid_columns[k], these_grid_rows[k], MACHINE_STRING,
fontsize=OVERLAY_FONT_SIZE, color=OVERLAY_FONT_COLOUR,
fontweight='bold', horizontalalignment='center',
verticalalignment='center')
these_grid_rows, these_grid_columns = numpy.where(numpy.logical_and(
numpy.invert(machine_mask_matrix_3d[..., channel_index]),
human_mask_matrix_3d[..., channel_index]
))
these_grid_rows = these_grid_rows + 0.5
these_grid_columns = these_grid_columns + 0.5
for k in range(len(these_grid_rows)):
this_axes_object.text(
these_grid_columns[k], these_grid_rows[k], HUMAN_STRING,
fontsize=OVERLAY_FONT_SIZE, color=OVERLAY_FONT_COLOUR,
fontweight='bold', horizontalalignment='center',
verticalalignment='center')
def _plot_comparison(
predictor_matrix, model_metadata_dict, machine_mask_matrix_3d,
human_mask_matrix_3d, iou_by_channel, positive_flag, output_file_name):
"""Plots comparison between human and machine interpretation maps.
M = number of rows in grid (physical space)
N = number of columns in grid (physical space)
C = number of channels
:param predictor_matrix: M-by-N-by-C numpy array of predictors.
:param model_metadata_dict: Dictionary returned by
`cnn.read_model_metadata`.
:param machine_mask_matrix_3d: M-by-N-by-C numpy array of Boolean flags,
indicating where machine interpretation value is strongly positive or
negative.
:param human_mask_matrix_3d: Same.
:param iou_by_channel: length-C numpy array of IoU values (intersection over
union) between human and machine masks.
:param positive_flag: Boolean flag. If True (False), masks indicate where
interpretation value is strongly positive (negative).
:param output_file_name: Path to output file (figure will be saved here).
"""
training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
list_of_layer_operation_dicts = model_metadata_dict[
cnn.LAYER_OPERATIONS_KEY]
if list_of_layer_operation_dicts is None:
field_name_by_panel = training_option_dict[trainval_io.RADAR_FIELDS_KEY]
panel_names = radar_plotting.fields_and_heights_to_names(
field_names=field_name_by_panel,
heights_m_agl=training_option_dict[trainval_io.RADAR_HEIGHTS_KEY]
)
plot_colour_bar_by_panel = numpy.full(
len(field_name_by_panel), True, dtype=bool
)
else:
field_name_by_panel, panel_names = (
radar_plotting.layer_operations_to_names(
list_of_layer_operation_dicts=list_of_layer_operation_dicts
)
)
plot_colour_bar_by_panel = numpy.full(
len(field_name_by_panel), False, dtype=bool
)
plot_colour_bar_by_panel[2::3] = True
num_panels = len(field_name_by_panel)
num_panel_rows = int(numpy.floor(
numpy.sqrt(num_panels)
))
for k in range(num_panels):
panel_names[k] += '\n{0:s} IoU = {1:.3f}'.format(
'Positive' if positive_flag else 'Negative',
iou_by_channel[k]
)
axes_object_matrix = radar_plotting.plot_many_2d_grids_without_coords(
field_matrix=numpy.flip(predictor_matrix, axis=0),
field_name_by_panel=field_name_by_panel, panel_names=panel_names,
num_panel_rows=num_panel_rows,
plot_colour_bar_by_panel=plot_colour_bar_by_panel, font_size=14,
row_major=False
)[1]
for k in range(num_panels):
_plot_comparison_one_channel(
human_mask_matrix_3d=human_mask_matrix_3d,
machine_mask_matrix_3d=numpy.flip(machine_mask_matrix_3d, axis=0),
channel_index=k, axes_object_matrix=axes_object_matrix)
print('Saving figure to: "{0:s}"...'.format(output_file_name))
pyplot.savefig(output_file_name, dpi=FIGURE_RESOLUTION_DPI,
pad_inches=0., bbox_inches='tight')
pyplot.close()
def _reshape_human_maps(model_metadata_dict, positive_mask_matrix_4d,
negative_mask_matrix_4d):
"""Reshapes human interpretation maps to match machine interpretation maps.
M = number of rows in grid (physical space)
N = number of columns in grid (physical space)
J = number of panel rows
K = number of panel columns
C = J * K = number of channels
:param model_metadata_dict: Dictionary returned by
`cnn.read_model_metadata`.
:param positive_mask_matrix_4d: J-by-K-by-M-by-N numpy array of Boolean
flags.
:param negative_mask_matrix_4d: Same, except this may be None.
:return: positive_mask_matrix_3d: M-by-N-by-C numpy array of Boolean flags,
with values matching `positive_mask_matrix_4d`.
:return: negative_mask_matrix_3d: M-by-N-by-C numpy array of Boolean flags,
with values matching `negative_mask_matrix_4d`. If
`negative_mask_matrix_4d is None`, this is also None.
:raises: TypeError: if model performs 2-D and 3-D convolution.
:raises: ValueError: if number of channels in mask != number of input
channels to model (in the predictor matrix).
"""
if model_metadata_dict[cnn.CONV_2D3D_KEY]:
raise TypeError(
'This script cannot handle models that perform 2-D and 3-D'
'convolution.'
)
training_option_dict = model_metadata_dict[cnn.TRAINING_OPTION_DICT_KEY]
list_of_layer_operation_dicts = model_metadata_dict[
cnn.LAYER_OPERATIONS_KEY]
if list_of_layer_operation_dicts is None:
num_machine_channels = len(
training_option_dict[trainval_io.RADAR_FIELDS_KEY]
)
else:
num_machine_channels = len(list_of_layer_operation_dicts)
num_panel_rows = positive_mask_matrix_4d.shape[0]
num_panel_columns = positive_mask_matrix_4d.shape[1]
num_human_channels = num_panel_rows * num_panel_columns
if num_machine_channels != num_human_channels:
error_string = (
'Number of channels in human masks ({0:d}) != number of input '
'channels to model ({1:d}).'
).format(num_human_channels, num_machine_channels)
raise ValueError(error_string)
this_shape = positive_mask_matrix_4d.shape[2:] + (num_human_channels,)
positive_mask_matrix_3d = numpy.full(this_shape, False, dtype=bool)
if negative_mask_matrix_4d is None:
negative_mask_matrix_3d = None
else:
negative_mask_matrix_3d = numpy.full(this_shape, False, dtype=bool)
for k in range(num_human_channels):
this_panel_row, this_panel_column = numpy.unravel_index(
k, (num_panel_rows, num_panel_columns), order='F'
)
positive_mask_matrix_3d[..., k] = positive_mask_matrix_4d[
this_panel_row, this_panel_column, ...]
if negative_mask_matrix_3d is None:
continue
negative_mask_matrix_3d[..., k] = negative_mask_matrix_4d[
this_panel_row, this_panel_column, ...]
return positive_mask_matrix_3d, negative_mask_matrix_3d
def _do_comparison_one_channel(
machine_interpretation_matrix_2d, abs_percentile_threshold,
human_positive_mask_matrix_2d, human_negative_mask_matrix_2d=None):
"""Compares human and machine masks for one channel.
M = number of rows in grid (physical space)
N = number of columns in grid (physical space)
:param machine_interpretation_matrix_2d: M-by-N numpy array of
interpretation values (floats).
:param abs_percentile_threshold: See documentation at top of file. This
will be used to turn `machine_interpretation_matrix_2d` into one or two
masks.
:param human_positive_mask_matrix_2d: M-by-N numpy array of Boolean flags,
indicating where the human thinks the interpretation value is strongly
POSITIVE.
:param human_negative_mask_matrix_2d: M-by-N numpy array of Boolean flags,
indicating where the human thinks the interpretation value is strongly
NEGATIVE. This may be None.
:return: comparison_dict: Dictionary with the following keys.
comparison_dict['machine_positive_mask_matrix_2d']: Same as
`human_positive_mask_matrix_2d` but for the machine.
comparison_dict['positive_iou']: IoU (intersection over union) between
positive masks for human and machine.
comparison_dict['machine_negative_mask_matrix_2d']: Same as
`human_negative_mask_matrix_2d` but for the machine. If
`human_negative_mask_matrix_2d is None`, this is None.
comparison_dict['negative_iou']: IoU (intersection over union) between
negative masks for human and machine. If
`human_negative_mask_matrix_2d is None`, this is None.
"""
if abs_percentile_threshold is None:
this_percentile_threshold = 100 * (
1 - numpy.mean(human_positive_mask_matrix_2d)
)
else:
this_percentile_threshold = abs_percentile_threshold + 0.
print(this_percentile_threshold)
if numpy.any(machine_interpretation_matrix_2d > 0):
positive_threshold = numpy.percentile(
machine_interpretation_matrix_2d[
machine_interpretation_matrix_2d > 0],
this_percentile_threshold
)
else:
positive_threshold = TOLERANCE + 0.
machine_positive_mask_matrix_2d = (
machine_interpretation_matrix_2d >= positive_threshold
)
positive_iou = _compute_iou(
machine_mask_matrix=machine_positive_mask_matrix_2d,
human_mask_matrix=human_positive_mask_matrix_2d)
comparison_dict = {
MACHINE_POSITIVE_MASK_KEY: machine_positive_mask_matrix_2d,
POSITIVE_IOU_KEY: positive_iou,
MACHINE_NEGATIVE_MASK_KEY: None,
NEGATIVE_IOU_KEY: None
}
if human_negative_mask_matrix_2d is None:
return comparison_dict
if abs_percentile_threshold is None:
this_percentile_threshold = (
100 * numpy.mean(human_negative_mask_matrix_2d)
)
else:
this_percentile_threshold = 100. - abs_percentile_threshold
print(this_percentile_threshold)
if numpy.any(machine_interpretation_matrix_2d < 0):
negative_threshold = numpy.percentile(
machine_interpretation_matrix_2d[
machine_interpretation_matrix_2d < 0],
this_percentile_threshold
)
else:
negative_threshold = -1 * TOLERANCE
machine_negative_mask_matrix_2d = (
machine_interpretation_matrix_2d <= negative_threshold
)
negative_iou = _compute_iou(
machine_mask_matrix=machine_negative_mask_matrix_2d,
human_mask_matrix=human_negative_mask_matrix_2d)
comparison_dict[MACHINE_NEGATIVE_MASK_KEY] = machine_negative_mask_matrix_2d
comparison_dict[NEGATIVE_IOU_KEY] = negative_iou
return comparison_dict
def _run(input_human_file_name, input_machine_file_name, guided_gradcam_flag,
abs_percentile_threshold, output_dir_name):
"""Compares human-generated vs. machine-generated interpretation map.
This is effectively the main method.
:param input_human_file_name: See documentation at top of file.
:param input_machine_file_name: Same.
:param guided_gradcam_flag: Same.
:param abs_percentile_threshold: Same.
:param output_dir_name: Same.
"""
file_system_utils.mkdir_recursive_if_necessary(
directory_name=output_dir_name)
if abs_percentile_threshold < 0:
abs_percentile_threshold = None
if abs_percentile_threshold is not None:
error_checking.assert_is_leq(abs_percentile_threshold, 100.)
print('Reading data from: "{0:s}"...'.format(input_human_file_name))
human_polygon_dict = human_polygons.read_polygons(input_human_file_name)
human_positive_mask_matrix_4d = human_polygon_dict[
human_polygons.POSITIVE_MASK_MATRIX_KEY]
human_negative_mask_matrix_4d = human_polygon_dict[
human_polygons.NEGATIVE_MASK_MATRIX_KEY]
full_storm_id_string = human_polygon_dict[human_polygons.STORM_ID_KEY]
storm_time_unix_sec = human_polygon_dict[human_polygons.STORM_TIME_KEY]
pmm_flag = full_storm_id_string is None and storm_time_unix_sec is None
print('Reading data from: "{0:s}"...'.format(input_machine_file_name))
# TODO(thunderhoser): This is a HACK.
machine_channel_indices = numpy.array([2, 8], dtype=int)
if pmm_flag:
try:
saliency_dict = saliency_maps.read_pmm_file(input_machine_file_name)
saliency_flag = True
model_file_name = saliency_dict[saliency_maps.MODEL_FILE_KEY]
predictor_matrix = saliency_dict.pop(
saliency_maps.MEAN_INPUT_MATRICES_KEY
)[0][..., machine_channel_indices]
machine_interpretation_matrix_3d = saliency_dict.pop(
saliency_maps.MEAN_SALIENCY_MATRICES_KEY
)[0][..., machine_channel_indices]
except ValueError:
gradcam_dict = gradcam.read_pmm_file(input_machine_file_name)
saliency_flag = False
model_file_name = gradcam_dict[gradcam.MODEL_FILE_KEY]
predictor_matrix = gradcam_dict.pop(
gradcam.MEAN_INPUT_MATRICES_KEY
)[0][..., machine_channel_indices]
if guided_gradcam_flag:
machine_interpretation_matrix_3d = gradcam_dict.pop(
gradcam.MEAN_GUIDED_GRADCAM_KEY
)[..., machine_channel_indices]
else:
machine_interpretation_matrix_3d = gradcam_dict.pop(
gradcam.MEAN_CLASS_ACTIVATIONS_KEY)
else:
try:
saliency_dict = saliency_maps.read_standard_file(
input_machine_file_name)
saliency_flag = True
all_full_id_strings = saliency_dict[saliency_maps.FULL_IDS_KEY]
all_times_unix_sec = saliency_dict[saliency_maps.STORM_TIMES_KEY]
model_file_name = saliency_dict[saliency_maps.MODEL_FILE_KEY]
predictor_matrix = saliency_dict.pop(
saliency_maps.INPUT_MATRICES_KEY
)[0][..., machine_channel_indices]
machine_interpretation_matrix_3d = saliency_dict.pop(
saliency_maps.SALIENCY_MATRICES_KEY
)[0][..., machine_channel_indices]
except ValueError:
gradcam_dict = gradcam.read_standard_file(input_machine_file_name)
saliency_flag = False
all_full_id_strings = gradcam_dict[gradcam.FULL_IDS_KEY]
all_times_unix_sec = gradcam_dict[gradcam.STORM_TIMES_KEY]
model_file_name = gradcam_dict[gradcam.MODEL_FILE_KEY]
predictor_matrix = gradcam_dict.pop(
gradcam.INPUT_MATRICES_KEY
)[0][..., machine_channel_indices]
if guided_gradcam_flag:
machine_interpretation_matrix_3d = gradcam_dict.pop(
gradcam.GUIDED_GRADCAM_KEY
)[..., machine_channel_indices]
else:
machine_interpretation_matrix_3d = gradcam_dict.pop(
gradcam.CLASS_ACTIVATIONS_KEY)
storm_object_index = tracking_utils.find_storm_objects(
all_id_strings=all_full_id_strings,
all_times_unix_sec=all_times_unix_sec,
id_strings_to_keep=[full_storm_id_string],
times_to_keep_unix_sec=numpy.array(
[storm_time_unix_sec], dtype=int
),
allow_missing=False
)[0]
predictor_matrix = predictor_matrix[storm_object_index, ...]
machine_interpretation_matrix_3d = machine_interpretation_matrix_3d[
storm_object_index, ...]
if not saliency_flag and not guided_gradcam_flag:
machine_interpretation_matrix_3d = numpy.expand_dims(
machine_interpretation_matrix_3d, axis=-1)
machine_interpretation_matrix_3d = numpy.repeat(
a=machine_interpretation_matrix_3d,
repeats=predictor_matrix.shape[-1], axis=-1)
if not (saliency_flag or guided_gradcam_flag):
human_negative_mask_matrix_4d = None
model_metafile_name = '{0:s}/model_metadata.p'.format(
os.path.split(model_file_name)[0]
)
print('Reading metadata from: "{0:s}"...'.format(model_metafile_name))
model_metadata_dict = cnn.read_model_metadata(model_metafile_name)
model_metadata_dict[cnn.LAYER_OPERATIONS_KEY] = [
model_metadata_dict[cnn.LAYER_OPERATIONS_KEY][k]
for k in machine_channel_indices
]
human_positive_mask_matrix_3d, human_negative_mask_matrix_3d = (
_reshape_human_maps(
model_metadata_dict=model_metadata_dict,
positive_mask_matrix_4d=human_positive_mask_matrix_4d,
negative_mask_matrix_4d=human_negative_mask_matrix_4d)
)
num_channels = human_positive_mask_matrix_3d.shape[-1]
machine_positive_mask_matrix_3d = numpy.full(
human_positive_mask_matrix_3d.shape, False, dtype=bool)
positive_iou_by_channel = numpy.full(num_channels, numpy.nan)
if human_negative_mask_matrix_3d is None:
machine_negative_mask_matrix_3d = None
negative_iou_by_channel = None
else:
machine_negative_mask_matrix_3d = numpy.full(
human_negative_mask_matrix_3d.shape, False, dtype=bool)
negative_iou_by_channel = numpy.full(num_channels, numpy.nan)
for k in range(num_channels):
this_negative_matrix = (
None if human_negative_mask_matrix_3d is None
else human_negative_mask_matrix_3d[..., k]
)
this_comparison_dict = _do_comparison_one_channel(
machine_interpretation_matrix_2d=machine_interpretation_matrix_3d[
..., k],
abs_percentile_threshold=abs_percentile_threshold,
human_positive_mask_matrix_2d=human_positive_mask_matrix_3d[..., k],
human_negative_mask_matrix_2d=this_negative_matrix)
machine_positive_mask_matrix_3d[..., k] = this_comparison_dict[
MACHINE_POSITIVE_MASK_KEY]
positive_iou_by_channel[k] = this_comparison_dict[POSITIVE_IOU_KEY]
if human_negative_mask_matrix_3d is None:
continue
machine_negative_mask_matrix_3d[..., k] = this_comparison_dict[
MACHINE_NEGATIVE_MASK_KEY]
negative_iou_by_channel[k] = this_comparison_dict[NEGATIVE_IOU_KEY]
this_file_name = '{0:s}/positive_comparison.jpg'.format(output_dir_name)
_plot_comparison(
predictor_matrix=predictor_matrix,
model_metadata_dict=model_metadata_dict,
machine_mask_matrix_3d=machine_positive_mask_matrix_3d,
human_mask_matrix_3d=human_positive_mask_matrix_3d,
iou_by_channel=positive_iou_by_channel,
positive_flag=True, output_file_name=this_file_name)
if human_negative_mask_matrix_3d is None:
return
this_file_name = '{0:s}/negative_comparison.jpg'.format(output_dir_name)
_plot_comparison(
predictor_matrix=predictor_matrix,
model_metadata_dict=model_metadata_dict,
machine_mask_matrix_3d=machine_negative_mask_matrix_3d,
human_mask_matrix_3d=human_negative_mask_matrix_3d,
iou_by_channel=negative_iou_by_channel,
positive_flag=False, output_file_name=this_file_name)
if __name__ == '__main__':
INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()
_run(
input_human_file_name=getattr(INPUT_ARG_OBJECT, HUMAN_FILE_ARG_NAME),
input_machine_file_name=getattr(
INPUT_ARG_OBJECT, MACHINE_FILE_ARG_NAME),
guided_gradcam_flag=bool(getattr(
INPUT_ARG_OBJECT, GUIDED_GRADCAM_ARG_NAME)),
abs_percentile_threshold=getattr(INPUT_ARG_OBJECT, THRESHOLD_ARG_NAME),
output_dir_name=getattr(INPUT_ARG_OBJECT, OUTPUT_DIR_ARG_NAME)
)
| mit |
ronojoy/BDA_py_demos | demos_ch10/demo10_2.py | 19 | 1606 | """Bayesian data analysis
Chapter 10, demo 2
Importance sampling example
"""
from __future__ import division
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
# edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8', linewidth=2, markeredgewidth=0)
plt.rc('axes', color_cycle=('#377eb8','#e41a1c','#4daf4a',
'#984ea3','#ff7f00','#ffff33'))
plt.rc('patch', facecolor='#bfe2ff')
# fake interesting distribution
x = np.linspace(-3, 3, 200)
r = np.array([ 1.1 , 1.3 , -0.1 , -0.7 , 0.2 , -0.4 , 0.06, -1.7 ,
1.7 , 0.3 , 0.7 , 1.6 , -2.06, -0.74, 0.2 , 0.5 ])
# Estimate the density (named q, to emphesize that it does not need to be
# normalized). Parameter bw_method=0.48 is used to mimic the outcome of the
# kernelp function in Matlab.
q_func = stats.gaussian_kde(r, bw_method=0.48)
q = q_func.evaluate(x)
# importance sampling example
g = stats.norm.pdf(x)
w = q/g
r = np.random.randn(100)
r = r[np.abs(r) < 3] # remove samples out of the grid
wr = q_func.evaluate(r)/stats.norm.pdf(r)
# plot
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(10,8))
axes[0].plot(x, q, label=r'$q(\theta|y)$')
axes[0].plot(x, g, label=r'$g(\theta)$')
axes[0].set_yticks(())
axes[0].set_title('target and proposal distributions')
axes[0].legend()
axes[1].plot(x, w, label=r'$q(\theta|y)/g(\theta)$')
axes[1].set_title('samples and importance weights')
axes[1].vlines(r, 0, wr, color='#377eb8', alpha=0.4)
axes[1].set_ylim((0,axes[1].get_ylim()[1]))
axes[1].legend()
plt.show()
| gpl-3.0 |
Adai0808/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 218 | 3893 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
JT5D/scikit-learn | sklearn/metrics/cluster/bicluster/tests/test_bicluster_metrics.py | 13 | 1145 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal
from ..bicluster_metrics import _jaccard
from ..bicluster_metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
| bsd-3-clause |
alshedivat/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/__init__.py | 42 | 2656 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to allow different io formats (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import extract_dask_data
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import extract_dask_labels
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import HAS_DASK
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import queue_parsed_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_examples
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_record_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_examples
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_examples_shared_queue
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_features_shared_queue
from tensorflow.contrib.learn.python.learn.learn_io.numpy_io import numpy_input_fn
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_data
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_labels
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_matrix
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import pandas_input_fn
from tensorflow.contrib.learn.python.learn.learn_io.generator_io import generator_input_fn
| apache-2.0 |
djgagne/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 213 | 3359 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.validation import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |
AlirezaShahabi/zipline | zipline/assets/futures.py | 15 | 5311 | from pandas import Timestamp, Timedelta
from pandas.tseries.tools import normalize_date
class FutureChain(object):
""" Allows users to look up future contracts.
Parameters
----------
asset_finder : AssetFinder
An AssetFinder for future contract lookups, in particular the
AssetFinder of the TradingAlgorithm instance.
get_datetime : function
A function that returns the simulation datetime, in particular
the get_datetime method of the TradingAlgorithm instance.
root_symbol : str
The root symbol of a future chain.
as_of_date : pandas.Timestamp, optional
Date at which the chain determination is rooted. I.e. the
existing contract whose notice date is first after this date is
the primary contract, etc. If not provided, the current
simulation date is used as the as_of_date.
Attributes
----------
root_symbol : str
The root symbol of the future chain.
as_of_date
The current as-of date of this future chain.
Methods
-------
as_of(dt)
offset(time_delta)
Raises
------
RootSymbolNotFound
Raised when the FutureChain is initialized with a root symbol for which
a future chain could not be found.
"""
def __init__(self, asset_finder, get_datetime, root_symbol,
as_of_date=None):
self.root_symbol = root_symbol
# Reference to the algo's AssetFinder for contract lookups
self._asset_finder = asset_finder
# Reference to the algo's get_datetime to know the current dt
self._algorithm_get_datetime = get_datetime
# If an as_of_date is provided, self._as_of_date uses that
# value, otherwise None. This attribute backs the as_of_date property.
if as_of_date:
self._as_of_date = normalize_date(Timestamp(as_of_date, tz='UTC'))
else:
self._as_of_date = None
# Attribute to cache the most up-to-date chain, and the dt when it was
# last updated.
self._current_chain = []
self._last_updated = None
# Get the initial chain, since self._last_updated is None.
self._maybe_update_current_chain()
def __repr__(self):
# NOTE: The string returned cannot be used to instantiate this
# exact FutureChain, since we don't want to display the asset
# finder and get_datetime function to the user.
if self._as_of_date:
return "FutureChain(root_symbol='%s', as_of_date='%s')" % (
self.root_symbol, self.as_of_date)
else:
return "FutureChain(root_symbol='%s')" % self.root_symbol
def _get_datetime(self):
"""
Returns the normalized simulation datetime.
Returns
-------
pandas.Timestamp
The normalized datetime of FutureChain's TradingAlgorithm.
"""
return normalize_date(
Timestamp(self._algorithm_get_datetime(), tz='UTC')
)
@property
def as_of_date(self):
"""
The current as-of date of this future chain.
Returns
-------
pandas.Timestamp
The user-provided as_of_date if given, otherwise the
current datetime of the simulation.
"""
if self._as_of_date is not None:
return self._as_of_date
else:
return self._get_datetime()
def _maybe_update_current_chain(self):
""" Updates the current chain if it's out of date, then returns
it.
Returns
-------
list
The up-to-date current chain, a list of Future objects.
"""
dt = self._get_datetime()
if (self._last_updated is None) or (self._last_updated != dt):
self._current_chain = self._asset_finder.lookup_future_chain(
self.root_symbol,
self.as_of_date,
dt
)
self._last_updated = dt
return self._current_chain
def __getitem__(self, key):
return self._maybe_update_current_chain()[key]
def __len__(self):
return len(self._maybe_update_current_chain())
def __iter__(self):
return iter(self._maybe_update_current_chain())
def as_of(self, dt):
""" Get the future chain for this root symbol as of a specific date.
Parameters
----------
dt : datetime.datetime or pandas.Timestamp or str, optional
The as_of_date for the new chain.
Returns
-------
FutureChain
"""
return FutureChain(
asset_finder=self._asset_finder,
get_datetime=self._algorithm_get_datetime,
root_symbol=self.root_symbol,
as_of_date=dt
)
def offset(self, time_delta):
""" Get the future chain for this root symbol with a given
offset from the current as_of_date.
Parameters
----------
time_delta : datetime.timedelta or pandas.Timedelta or str
The offset from the current as_of_date for the new chain.
Returns
-------
FutureChain
"""
return self.as_of(self.as_of_date + Timedelta(time_delta))
| apache-2.0 |
LABSN/expyfun | expyfun/analyze/_viz.py | 2 | 22433 | """Analysis visualization functions
"""
import numpy as np
from itertools import chain
from .._utils import string_types
def format_pval(pval, latex=True, scheme='default'):
"""Format a p-value using one of several schemes.
Parameters
----------
pval : float | array-like
The raw p-value(s).
latex : bool
Whether to use LaTeX wrappers suitable for use with matplotlib.
scheme : str
A keyword indicating the formatting scheme. Currently supports "stars",
"ross", and "default"; any other string will yield the same as
"default".
Returns
-------
pv : str | np.objectarray
A string or array of strings of formatted p-values. If a list output is
preferred, users may call ``.tolist()`` on the output of the function.
"""
single_value = False
if np.array(pval).shape == ():
single_value = True
pval = np.atleast_1d(np.asanyarray(pval))
# add a tiny amount to handle cases where p is exactly a power of ten
pval = pval + np.finfo(pval.dtype).eps
expon = np.trunc(np.log10(pval)).astype(int) # exponents
pv = np.zeros_like(pval, dtype=object)
if latex:
wrap = '$'
brk_l = '{{'
brk_r = '}}'
else:
wrap = ''
brk_l = ''
brk_r = ''
if scheme == 'ross': # (exact value up to 4 decimal places)
pv[pval >= 0.0001] = [wrap + 'p = {:.4f}'.format(x) + wrap
for x in pval[pval > 0.0001]]
pv[pval < 0.0001] = [wrap + 'p < 10^' + brk_l + '{}'.format(x) +
brk_r + wrap for x in expon[pval < 0.0001]]
elif scheme == 'stars':
star = '{*}' if latex else '*'
pv[pval >= 0.05] = wrap + '' + wrap
pv[pval < 0.05] = wrap + star + wrap
pv[pval < 0.01] = wrap + star * 2 + wrap
pv[pval < 0.001] = wrap + star * 3 + wrap
else: # scheme == 'default'
pv[pval >= 0.05] = wrap + 'n.s.' + wrap
pv[pval < 0.05] = wrap + 'p < 0.05' + wrap
pv[pval < 0.01] = wrap + 'p < 0.01' + wrap
pv[pval < 0.001] = wrap + 'p < 0.001' + wrap
pv[pval < 0.0001] = [wrap + 'p < 10^' + brk_l + '{}'.format(x) +
brk_r + wrap for x in expon[pval < 0.0001]]
if single_value:
pv = pv[0]
return(pv)
def _instantiate(obj, typ):
"""Returns obj if obj is not None, else returns new instance of typ
obj : an object
An object (most likely one that a user passed into a function) that,
if ``None``, should be initiated as an empty object of some other type.
typ : an object type
Expected values are list, dict, int, bool, etc.
"""
return typ() if obj is None else obj
def barplot(h, axis=-1, ylim=None, err_bars=None, lines=False,
groups=None, eq_group_widths=False, gap_size=0.2,
brackets=None, bracket_text=None, bracket_inline=False,
bracket_group_lines=False, bar_names=None, group_names=None,
bar_kwargs=None, err_kwargs=None, line_kwargs=None,
bracket_kwargs=None, pval_kwargs=None, figure_kwargs=None,
smart_defaults=True, fname=None, ax=None):
"""Makes barplots w/ optional line overlays, grouping, & signif. brackets.
Parameters
----------
h : array-like
If `h` is 2-dimensional, heights will be calculated as means along
the axis given by `axis`. If `h` is of lower dimension, it is
treated as raw height values. If `h` is a `pandas.DataFrame` and
`bar_names` is ``None``, `bar_names` will be inferred from the
DataFrame's `column` labels (if ``axis=0``) or `index` labels.
axis : int
The axis along which to calculate mean values to determine bar heights.
Ignored if `h` is 0- or 1-dimensional.
ylim : tuple | None
y-axis limits passed to `matplotlib.pyplot.subplot.set_ylim`.
err_bars : str | array-like | None
Type of error bars to be added to the barplot. Possible values are
``'sd'`` for sample standard deviation, ``'se'`` for standard error of
the mean, or ``'ci'`` for 95% confidence interval. If ``None``, no
error bars will be plotted. Custom error bar heights are possible by
passing an array-like object; in such cases `err_bars` must have the
same dimensionality and shape as `h`.
lines : bool
Whether to plot within-subject data as lines overlaid on the barplot.
groups : list | None
List of lists containing the integers in ``range(num_bars)``, with
sub-lists indicating the desired grouping. For example, if `h` has
has shape (10, 4) and ``axis = -1`` then "num_bars" is 4; if you want
the first bar isolated and the remaining three grouped, then specify
``groups=[[0], [1, 2, 3]]``.
eq_group_widths : bool
Should all groups have the same width? If ``False``, all bars will have
the same width. Ignored if `groups` is ``None``, since the bar/group
distinction is meaningless in that case.
gap_size : float
Width of the gap between groups (if `eq_group_width` is ``True``) or
between bars, expressed as a proportion [0,1) of group or bar width.
Half the width of `gap_size` will be added between the outermost bars
and the plot edges.
brackets : list of tuples | None
Location of significance brackets. Scheme is similar to the
specification of `groups`; a bracket between the first and second bar
and another between the third and fourth bars would be specified as
``brackets=[(0, 1), (2, 3)]``. Brackets between groups of bars instead
of individual bars are specified as lists within the tuple:
``brackets=[([0, 1], [2, 3])]`` draws a single bracket between group
``[0, 1]`` and group ``[2, 3]``. For best results, pairs of adjacent
bars should come earlier in the list than non-adjacent pairs.
bracket_text : str | list | None
Text to display above brackets.
bracket_inline : bool
If ``True``, bracket text will be vertically centered along a broken
bracket line. If ``False``, text will be above the line.
bracket_group_lines : bool
When drawing brackets between groups rather than single bars, should a
horizontal line be drawn at each foot of the bracket to indicate this?
bar_names : array-like | None
Optional axis labels for each bar.
group_names : array-like | None
Optional axis labels for each group.
bar_kwargs : dict
Arguments passed to ``matplotlib.pyplot.bar()`` (ex: color, linewidth).
err_kwargs : dict
Arguments passed to ``matplotlib.pyplot.bar(error_kw)`` (ex: ecolor,
capsize).
line_kwargs : dict
Arguments passed to ``matplotlib.pyplot.plot()`` (e.g., color, marker,
linestyle).
bracket_kwargs : dict
arguments passed to ``matplotlib.pyplot.plot()`` (e.g., color, marker,
linestyle).
pval_kwargs : dict
Arguments passed to ``matplotlib.pyplot.annotate()`` when drawing
bracket labels.
figure_kwargs : dict
arguments passed to ``matplotlib.pyplot.figure()`` (e.g., figsize, dpi,
frameon).
smart_defaults : bool
Whether to use pyplot default colors (``False``), or something more
pleasing to the eye (``True``).
fname : str | None
Path and name of output file. File type is inferred from the file
extension of `fname` and should work for any of the types supported by
pyplot (pdf, eps, svg, png, raw).
ax : matplotlib.pyplot.axes | None
A ``matplotlib.pyplot.axes`` instance. If ``None``, a new figure with
a single subplot will be created.
Returns
-------
p : handle for the ``matplotlib.pyplot.subplot`` instance.
b : handle for the ``matplotlib.pyplot.bar`` instance.
Notes
-----
Known limitations:
1 Bracket heights don't get properly set when generating multiple
subplots with ``sharey=True`` (matplotlib seems to temporarily force
the ``ylim`` to +/- 0.6 in this case). Work around is to use
``sharey=False`` and manually set ``ylim`` for each subplot.
2 Brackets that span groups cannot span partial groups. For example,
if ``groups=[[0, 1, 2], [3, 4]]`` it is impossible to have a bracket
at ``[(0, 1), (3, 4)]``... it is only possible to do, e.g.,
``[0, (3, 4)]`` (single bar vs group) or ``[(0, 1, 2), (3, 4)]``
(full group vs full group).
3 Bracket drawing is much better when adjacent pairs of bars are
specified before non-adjacent pairs of bars.
Smart defaults sets the following parameters:
bar color: light gray (70%)
error bar color: black
line color: black
bracket color: dark gray (30%)
"""
from matplotlib import pyplot as plt, rcParams
try:
from pandas.core.frame import DataFrame
except Exception:
DataFrame = None
# be nice to pandas
if DataFrame is not None:
if isinstance(h, DataFrame) and bar_names is None:
bar_names = h.columns.tolist() if axis == 0 else h.index.tolist()
# check arg errors
if gap_size < 0 or gap_size >= 1:
raise ValueError('Barplot argument "gap_size" must be in the range '
'[0, 1).')
if err_bars is not None:
if isinstance(err_bars, string_types) and \
err_bars not in ['sd', 'se', 'ci']:
raise ValueError('err_bars must be "sd", "se", or "ci" (or an '
'array of error bar magnitudes).')
if brackets is not None:
if any([len(x) != 2 for x in brackets]):
raise ValueError('Each top-level element of brackets must have '
'length 2.')
if not len(brackets) == len(bracket_text):
raise ValueError('Mismatch between number of brackets and bracket '
'labels.')
# handle single-element args
if isinstance(bracket_text, string_types):
bracket_text = [bracket_text]
if isinstance(group_names, string_types):
group_names = [group_names]
# arg defaults: if arg is None, instantiate as given type
brackets = _instantiate(brackets, list)
bar_kwargs = _instantiate(bar_kwargs, dict)
err_kwargs = _instantiate(err_kwargs, dict)
line_kwargs = _instantiate(line_kwargs, dict)
pval_kwargs = _instantiate(pval_kwargs, dict)
figure_kwargs = _instantiate(figure_kwargs, dict)
bracket_kwargs = _instantiate(bracket_kwargs, dict)
# user-supplied Axes
if ax is not None:
bar_kwargs['axes'] = ax
# smart defaults
if smart_defaults:
if 'color' not in bar_kwargs.keys():
bar_kwargs['color'] = '0.7'
if 'color' not in line_kwargs.keys():
line_kwargs['color'] = 'k'
if 'ecolor' not in err_kwargs.keys():
err_kwargs['ecolor'] = 'k'
if 'color' not in bracket_kwargs.keys():
bracket_kwargs['color'] = '0.3'
# fix bar alignment (defaults to 'center' in more recent versions of MPL)
if 'align' not in bar_kwargs.keys():
bar_kwargs['align'] = 'edge'
# parse heights
h = np.array(h)
if len(h.shape) > 2:
raise ValueError('Barplot "h" must have 2 or fewer dimensions.')
heights = np.atleast_1d(h) if h.ndim < 2 else h.mean(axis=axis)
# grouping
num_bars = len(heights)
if groups is None:
groups = [[x] for x in range(num_bars)]
groups = [list(x) for x in groups] # forgive list/tuple mix-ups
# calculate bar positions
non_gap = 1 - gap_size
offset = gap_size / 2.
if eq_group_widths:
group_sizes = np.array([float(len(_grp)) for _grp in groups], int)
group_widths = [non_gap for _ in groups]
group_edges = [offset + _ix for _ix in range(len(groups))]
group_ixs = list(chain.from_iterable([range(x) for x in group_sizes]))
bar_widths = np.repeat(np.array(group_widths) / group_sizes,
group_sizes).tolist()
bar_edges = (np.repeat(group_edges, group_sizes) +
bar_widths * np.array(group_ixs)).tolist()
else:
bar_widths = [[non_gap for _ in _grp] for _grp in groups]
# next line: offset + cumul. gap widths + cumul. bar widths
bar_edges = [[offset + _ix * gap_size + _bar * non_gap
for _bar in _grp] for _ix, _grp in enumerate(groups)]
group_widths = [np.sum(_width) for _width in bar_widths]
group_edges = [_edge[0] for _edge in bar_edges]
bar_edges = list(chain.from_iterable(bar_edges))
bar_widths = list(chain.from_iterable(bar_widths))
bar_centers = np.array(bar_edges) + np.array(bar_widths) / 2.
group_centers = np.array(group_edges) + np.array(group_widths) / 2.
# calculate error bars
err = np.zeros(num_bars) # default if no err_bars
if err_bars is not None:
if h.ndim == 2:
if err_bars == 'sd': # sample standard deviation
err = h.std(axis)
elif err_bars == 'se': # standard error
err = h.std(axis) / np.sqrt(h.shape[axis])
else: # 95% conf int
err = 1.96 * h.std(axis) / np.sqrt(h.shape[axis])
else: # h.ndim == 1
if isinstance(err_bars, string_types):
raise ValueError('string arguments to "err_bars" ignored when '
'"h" has fewer than 2 dimensions.')
elif not h.shape == np.array(err_bars).shape:
raise ValueError('When "err_bars" is array-like it must have '
'the same shape as "h".')
err = np.atleast_1d(err_bars)
bar_kwargs['yerr'] = err
# plot (bars and error bars)
if ax is None:
plt.figure(**figure_kwargs)
p = plt.subplot(111)
else:
p = ax
b = p.bar(bar_edges, heights, bar_widths, error_kw=err_kwargs,
**bar_kwargs)
# plot within-subject lines
if lines:
_h = h if axis == 0 else h.T
xy = [(bar_centers, hts) for hts in _h]
for subj in xy:
p.plot(subj[0], subj[1], **line_kwargs)
# draw significance brackets
if len(brackets):
brackets = [tuple(x) for x in brackets] # forgive list/tuple mix-ups
brk_offset = np.diff(p.get_ylim()) * 0.025
brk_min_h = np.diff(p.get_ylim()) * 0.05
# temporarily plot a textbox to get its height
t = plt.annotate(bracket_text[0], (0, 0), **pval_kwargs)
t.set_bbox(dict(boxstyle='round, pad=0.25'))
plt.draw()
bb = t.get_bbox_patch().get_window_extent()
txth = np.diff(p.transData.inverted().transform(bb),
axis=0).ravel()[-1]
if bracket_inline:
txth = txth / 2.
t.remove()
# find highest points
if lines and h.ndim == 2: # brackets must be above lines & error bars
apex = np.amax(np.r_[np.atleast_2d(heights + err),
np.atleast_2d(np.amax(h, axis))], axis=0)
else:
apex = np.atleast_1d(heights + err)
apex = np.maximum(apex, 0) # for negative-going bars
apex = apex + brk_offset
gr_apex = np.array([np.amax(apex[_g]) for _g in groups])
# boolean for whether each half of a bracket is a group
is_group = [[hasattr(_b, 'append') for _b in _br] for _br in brackets]
# bracket left & right coords
brk_lr = [[group_centers[groups.index(_ix)] if _g
else bar_centers[_ix] for _ix, _g in zip(_brk, _isg)]
for _brk, _isg in zip(brackets, is_group)]
# bracket L/R midpoints (label position)
brk_c = [np.mean(_lr) for _lr in brk_lr]
# bracket bottom coords (first pass)
brk_b = [[gr_apex[groups.index(_ix)] if _g else apex[_ix]
for _ix, _g in zip(_brk, _isg)]
for _brk, _isg in zip(brackets, is_group)]
# main bracket positioning loop
brk_t = []
for _ix, (_brk, _isg) in enumerate(zip(brackets, is_group)):
# which bars does this bracket span?
spanned_bars = list(chain.from_iterable(
[_b if hasattr(_b, 'append') else [_b] for _b in _brk]))
spanned_bars = range(min(spanned_bars), max(spanned_bars) + 1)
# raise apex a bit extra if prev bracket label centered on bar
prev_label_pos = brk_c[_ix - 1] if _ix else -1
label_bar_ix = np.where(np.isclose(bar_centers, prev_label_pos))[0]
if any(np.array_equal(label_bar_ix, x) for x in _brk):
apex[label_bar_ix] += txth
elif any(_isg):
label_bar_less = np.where(bar_centers < prev_label_pos)[0]
label_bar_more = np.where(bar_centers > prev_label_pos)[0]
if len(label_bar_less) and len(label_bar_more):
apex[label_bar_less] += txth
apex[label_bar_more] += txth
gr_apex = np.array([np.amax(apex[_g]) for _g in groups])
# recalc lower tips of bracket: apex / gr_apex may have changed
brk_b[_ix] = [gr_apex[groups.index(_b)] if _g else apex[_b]
for _b, _g in zip(_brk, _isg)]
# calculate top span position
_min_t = max(apex[spanned_bars]) + brk_min_h
brk_t.append(_min_t)
# raise apex on spanned bars to account for bracket
apex[spanned_bars] = np.maximum(apex[spanned_bars],
_min_t) + brk_offset
gr_apex = np.array([np.amax(apex[_g]) for _g in groups])
# draw horz line spanning groups if desired
if bracket_group_lines:
for _brk, _isg, _blr in zip(brackets, is_group, brk_b):
for _bk, _g, _b in zip(_brk, _isg, _blr):
if _g:
_lr = [bar_centers[_ix]
for _ix in groups[groups.index(_bk)]]
_lr = (min(_lr), max(_lr))
p.plot(_lr, (_b, _b), **bracket_kwargs)
# draw (left, right, bottom-left, bottom-right, top, center, string)
for ((_l, _r), (_bl, _br), _t, _c, _s) in zip(brk_lr, brk_b, brk_t,
brk_c, bracket_text):
# bracket text
_t = float(_t) # on newer Pandas it can be shape (1,)
defaults = dict(ha='center', annotation_clip=False,
textcoords='offset points')
for k, v in defaults.items():
if k not in pval_kwargs.keys():
pval_kwargs[k] = v
if 'va' not in pval_kwargs.keys():
pval_kwargs['va'] = 'center' if bracket_inline else 'baseline'
if 'xytext' not in pval_kwargs.keys():
pval_kwargs['xytext'] = (0, 0) if bracket_inline else (0, 2)
txt = p.annotate(_s, (_c, _t), **pval_kwargs)
txt.set_bbox(dict(facecolor='w', alpha=0,
boxstyle='round, pad=0.2'))
plt.draw()
# bracket lines
lline = ((_l, _l), (_bl, _t))
rline = ((_r, _r), (_br, _t))
tline = ((_l, _r), (_t, _t))
if bracket_inline:
bb = txt.get_bbox_patch().get_window_extent()
txtw = np.diff(p.transData.inverted().transform(bb),
axis=0).ravel()[0]
_m = _c - txtw / 2.
_n = _c + txtw / 2.
tline = [((_l, _m), (_t, _t)), ((_n, _r), (_t, _t))]
else:
tline = [((_l, _r), (_t, _t))]
for x, y in [lline, rline] + tline:
p.plot(x, y, **bracket_kwargs)
# boost ymax if needed
ybnd = p.get_ybound()
if ybnd[-1] < _t + txth:
p.set_ybound(ybnd[0], _t + txth)
# annotation
box_off(p)
p.tick_params(axis='x', length=0, pad=12)
p.xaxis.set_ticks(bar_centers)
if bar_names is not None:
p.xaxis.set_ticklabels(bar_names, va='baseline')
if group_names is not None:
ymin = ylim[0] if ylim is not None else p.get_ylim()[0]
yoffset = -2.5 * rcParams['font.size']
for gn, gp in zip(group_names, group_centers):
p.annotate(gn, xy=(gp, ymin), xytext=(0, yoffset),
xycoords='data', textcoords='offset points',
ha='center', va='baseline')
# axis limits
p.set_xlim(0, bar_edges[-1] + bar_widths[-1] + gap_size / 2)
if ylim is not None:
p.set_ylim(ylim)
# output file
if fname is not None:
from os.path import splitext
fmt = splitext(fname)[-1][1:]
plt.savefig(fname, format=fmt, transparent=True)
# return handles for subplot and barplot instances
plt.draw()
return (p, b)
def box_off(ax):
"""Remove the top and right edges of a plot frame, and point ticks outward.
Parameters
----------
ax : matplotlib.axes.Axes
A matplotlib plot or subplot object.
"""
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.tick_params(axis='x', direction='out')
ax.tick_params(axis='y', direction='out')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
def plot_screen(screen, ax=None):
"""Plot a captured screenshot
Parameters
----------
screen : array
The N x M x 3 (or 4) array of screen pixel values.
ax : matplotlib Axes | None
If provided, the axes will be plotted to and cleared of ticks.
If None, a figure will be created.
Returns
-------
ax : matplotlib Axes
The axes used to plot the image.
"""
import matplotlib.pyplot as plt
screen = np.array(screen)
if screen.ndim != 3 or screen.shape[2] not in [3, 4]:
raise ValueError('screen must be a 3D array with 3 or 4 channels')
if ax is None:
plt.figure()
ax = plt.axes([0, 0, 1, 1])
ax.imshow(screen)
ax.axis('off')
return ax
| bsd-3-clause |
AndKyr/GETELEC | python/lengthtest.py | 1 | 1605 | #!/usr/bin/python
"""This plots the spectra as outputed in spectra.csv from getelec. Spectra has to be
T in the GetelecPar.in"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mb
import os
font = 30
mb.rcParams["font.family"] = "Serif"
mb.rcParams["font.size"] = font
mb.rcParams["axes.labelsize"] = font
mb.rcParams["xtick.labelsize"] = font
mb.rcParams["ytick.labelsize"] = font
mb.rcParams["legend.fontsize"] = font
mb.rcParams["lines.linewidth"] = 1.5
fig1 = plt.figure()
ax1 = fig1.gca()
ax1.set_xlabel(r"$R [nm]$")
ax1.set_ylabel(r"$L [nm]$")
ax1.set_title(r'Barrier length (L) - Radius of curvature (R)')
R = np.linspace(5.,50., 100)
L = np.copy(R)
Lex = np.copy(R)
temp = 300.
kT = 8.6173324e-5 * temp
os.system('cp in/par2.in in/GetelecPar.in')
for i in range(len(L)):
appstr = os.popen('./bin/current.exe 5. 4.5 300 %f | grep "L = " '%(R[i])).read().split()
#os.system('./bin/current.exe %f 4.5 3000.'%field)
L[i] = float(appstr[2])
os.system('cp in/par1.in in/GetelecPar.in')
for i in range(len(L)):
appstr = os.popen('./bin/current.exe 5. 4.5 300 %f | grep "L = " '%(R[i])).read().split()
#os.system('./bin/current.exe %f 4.5 3000.'%field)
Lex[i] = float(appstr[2])
appstr = os.popen('./bin/current.exe 5. 4.5 300 3000. | grep "L = " '%(R[i])).read().split()
Lsn = float(appstr[2])
ax1.plot(R,L,'b-', label = 'Approximate - KX')
ax1.plot(R,Lex,'r-', label = 'Numerical')
ax1.plot(np.array([R[0], R[-1]]), np.array([Lsn, Lsn]), 'k-', label = 'Approximate - SN')
ax1.legend(loc = 'best')
plt.show()
| gpl-3.0 |
jayflo/scikit-learn | examples/decomposition/plot_pca_3d.py | 354 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| bsd-3-clause |
lfairchild/PmagPy | dialogs/thellier_gui_lib.py | 1 | 14923 | #!/usr/bin/env python
#---------------------------------------------------------------------------
# Author: Ron Shaar
# Revision notes
#
# Rev 1.0 Initial revision August 2012
# Rev 2.0 November 2014
#---------------------------------------------------------------------------
import matplotlib
import pylab,scipy
from pylab import *
from scipy import *
#import pmag
import copy
import pmagpy.pmag as pmag
from SPD import spd
def get_PI_parameters(Data, acceptance_criteria, preferences, s, tmin, tmax, GUI_log, THERMAL,MICROWAVE):
datablock = Data[s]['datablock']
pars=copy.deepcopy(Data[s]['pars']) # assignments to pars are assiging to Data[s]['pars']
Pint_pars = spd.PintPars(Data, str(s), tmin, tmax, 'magic', preferences['show_statistics_on_gui'],acceptance_criteria)
Pint_pars.reqd_stats() # calculate only statistics indicated in preferences
if not Pint_pars.pars:
print("Could not get any parameters for {}".format(Pint_pars))
return 0
pars.update(Pint_pars.pars)
t_Arai=Data[s]['t_Arai']
x_Arai=Data[s]['x_Arai']
y_Arai=Data[s]['y_Arai']
x_tail_check=Data[s]['x_tail_check']
y_tail_check=Data[s]['y_tail_check']
zijdblock=Data[s]['zijdblock']
z_temperatures=Data[s]['z_temp']
#print tmin,tmax,z_temperatures
# check tmin
if tmin not in t_Arai or tmin not in z_temperatures:
return(pars)
# check tmax
if tmax not in t_Arai or tmin not in z_temperatures:
return(pars)
start=t_Arai.index(tmin)
end=t_Arai.index(tmax)
zstart=z_temperatures.index(tmin)
zend=z_temperatures.index(tmax)
zdata_segment=Data[s]['zdata'][zstart:zend+1]
# replacing PCA for zdata and for ptrms here
## removed a bunch of Ron's commented out old code
#-------------------------------------------------
# York regresssion (York, 1967) following Coe (1978)
# calculate f,fvds,
# modified from pmag.py
#-------------------------------------------------
x_Arai_segment = x_Arai[start:end+1]
y_Arai_segment = y_Arai[start:end+1]
# replace thellier_gui code for york regression here
pars["specimen_int"]=-1*pars['lab_dc_field']*pars["specimen_b"]
# replace thellier_gui code for ptrm checks, DRAT etc. here
# also tail checks and SCAT
#-------------------------------------------------
# Add missing parts of code from old get_PI
#-------------------------------------------------
if MICROWAVE==True:
LP_code="LP-PI-M"
else:
LP_code="LP-PI-TRM"
count_IZ = Data[s]['steps_Arai'].count('IZ')
count_ZI = Data[s]['steps_Arai'].count('ZI')
if count_IZ > 1 and count_ZI > 1:
pars['magic_method_codes']=LP_code+":"+"LP-PI-BT-IZZI"
elif count_IZ < 1 and count_ZI > 1:
pars['magic_method_codes']=LP_code+":"+"LP-PI-ZI"
elif count_IZ > 1 and count_ZI < 1:
pars['magic_method_codes']=LP_code+":"+"LP-PI-IZ"
else:
pars['magic_method_codes']=LP_code
if 'ptrm_checks_temperatures' in list(Data[s].keys()) and len(Data[s]['ptrm_checks_temperatures'])>0:
if MICROWAVE==True:
pars['magic_method_codes']+=":LP-PI-ALT-PMRM"
else:
pars['magic_method_codes']+=":LP-PI-ALT-PTRM"
if 'tail_check_temperatures' in list(Data[s].keys()) and len(Data[s]['tail_check_temperatures'])>0:
pars['magic_method_codes']+=":LP-PI-BT-MD"
if 'additivity_check_temperatures' in list(Data[s].keys()) and len(Data[s]['additivity_check_temperatures'])>0:
pars['magic_method_codes']+=":LP-PI-BT"
#-------------------------------------------------
# Calculate anisotropy correction factor
#-------------------------------------------------
if "AniSpec" in list(Data[s].keys()):
pars["AC_WARNING"]=""
# if both aarm and atrm tensor axist, try first the aarm. if it fails use the atrm.
if 'AARM' in list(Data[s]["AniSpec"].keys()) and 'ATRM' in list(Data[s]["AniSpec"].keys()):
TYPES=['AARM','ATRM']
else:
TYPES=list(Data[s]["AniSpec"].keys())
for TYPE in TYPES:
red_flag=False
S_matrix=zeros((3,3),'f')
S_matrix[0,0]=Data[s]['AniSpec'][TYPE]['anisotropy_s1']
S_matrix[1,1]=Data[s]['AniSpec'][TYPE]['anisotropy_s2']
S_matrix[2,2]=Data[s]['AniSpec'][TYPE]['anisotropy_s3']
S_matrix[0,1]=Data[s]['AniSpec'][TYPE]['anisotropy_s4']
S_matrix[1,0]=Data[s]['AniSpec'][TYPE]['anisotropy_s4']
S_matrix[1,2]=Data[s]['AniSpec'][TYPE]['anisotropy_s5']
S_matrix[2,1]=Data[s]['AniSpec'][TYPE]['anisotropy_s5']
S_matrix[0,2]=Data[s]['AniSpec'][TYPE]['anisotropy_s6']
S_matrix[2,0]=Data[s]['AniSpec'][TYPE]['anisotropy_s6']
#Data[s]['AniSpec']['anisotropy_type']=Data[s]['AniSpec']['anisotropy_type']
Data[s]['AniSpec'][TYPE]['anisotropy_n']=int(float(Data[s]['AniSpec'][TYPE]['anisotropy_n']))
this_specimen_f_type=Data[s]['AniSpec'][TYPE]['anisotropy_type']+"_"+"%i"%(int(Data[s]['AniSpec'][TYPE]['anisotropy_n']))
Ftest_crit={}
Ftest_crit['ATRM_6']= 3.1059
Ftest_crit['AARM_6']= 3.1059
Ftest_crit['AARM_9']= 2.6848
Ftest_crit['AARM_15']= 2.4558
# threshold value for Ftest:
if 'AniSpec' in list(Data[s].keys()) and TYPE in list(Data[s]['AniSpec'].keys())\
and 'anisotropy_sigma' in list(Data[s]['AniSpec'][TYPE].keys()) \
and Data[s]['AniSpec'][TYPE]['anisotropy_sigma']!="":
# Calculate Ftest. If Ftest exceeds threshold value: set anistropy tensor to identity matrix
sigma=float(Data[s]['AniSpec'][TYPE]['anisotropy_sigma'])
nf = 3*int(Data[s]['AniSpec'][TYPE]['anisotropy_n'])-6
F=calculate_ftest(S_matrix,sigma,nf)
#print s,"F",F
Data[s]['AniSpec'][TYPE]['ftest']=F
#print "s,sigma,nf,F,Ftest_crit[this_specimen_f_type]"
#print s,sigma,nf,F,Ftest_crit[this_specimen_f_type]
if acceptance_criteria['specimen_aniso_ftest_flag']['value'] in ['g','1',1,True,'TRUE','True'] :
Ftest_threshold=Ftest_crit[this_specimen_f_type]
if Data[s]['AniSpec'][TYPE]['ftest'] < Ftest_crit[this_specimen_f_type]:
S_matrix=identity(3,'f')
pars["AC_WARNING"]=pars["AC_WARNING"]+"%s tensor fails F-test; "%(TYPE)
red_flag=True
else:
Data[s]['AniSpec'][TYPE]['anisotropy_sigma']=""
Data[s]['AniSpec'][TYPE]['ftest']=99999
if 'anisotropy_alt' in list(Data[s]['AniSpec'][TYPE].keys()) and Data[s]['AniSpec'][TYPE]['anisotropy_alt']!="":
if acceptance_criteria['anisotropy_alt']['value'] != -999 and \
(float(Data[s]['AniSpec'][TYPE]['anisotropy_alt']) > float(acceptance_criteria['anisotropy_alt']['value'])):
S_matrix=identity(3,'f')
pars["AC_WARNING"]=pars["AC_WARNING"]+"%s tensor fails alteration check: %.1f > %.1f; "%(TYPE,float(Data[s]['AniSpec'][TYPE]['anisotropy_alt']),float(acceptance_criteria['anisotropy_alt']['value']))
red_flag=True
else:
Data[s]['AniSpec'][TYPE]['anisotropy_alt']=""
Data[s]['AniSpec'][TYPE]['S_matrix']=S_matrix
#--------------------------
# if AARM passes all, use the AARM.
# if ATRM fail alteration use the AARM
# if both fail F-test: use AARM
#--------------------------
if len(TYPES)>1:
if "ATRM tensor fails alteration check" in pars["AC_WARNING"]:
TYPE='AARM'
elif "ATRM tensor fails F-test" in pars["AC_WARNING"]:
TYPE='AARM'
else:
TYPE=='AARM'
S_matrix= Data[s]['AniSpec'][TYPE]['S_matrix']
#---------------------------
TRM_anc_unit=array(pars['specimen_PCA_v1'])/sqrt(pars['specimen_PCA_v1'][0]**2+pars['specimen_PCA_v1'][1]**2+pars['specimen_PCA_v1'][2]**2)
B_lab_unit=pmag.dir2cart([ Data[s]['Thellier_dc_field_phi'], Data[s]['Thellier_dc_field_theta'],1])
#B_lab_unit=array([0,0,-1])
Anisotropy_correction_factor=linalg.norm(dot(inv(S_matrix),TRM_anc_unit.transpose()))*norm(dot(S_matrix,B_lab_unit))
pars["Anisotropy_correction_factor"]=Anisotropy_correction_factor
pars["AC_specimen_int"]= pars["Anisotropy_correction_factor"] * float(pars["specimen_int"])
pars["AC_anisotropy_type"]=Data[s]['AniSpec'][TYPE]["anisotropy_type"]
pars["specimen_int_uT"]=float(pars["AC_specimen_int"])*1e6
if TYPE=='AARM':
if ":LP-AN-ARM" not in pars['magic_method_codes']:
pars['magic_method_codes']+=":LP-AN-ARM:AE-H:DA-AC-AARM"
pars['specimen_correction']='c'
pars['specimen_int_corr_anisotropy']=Anisotropy_correction_factor
if TYPE=='ATRM':
if ":LP-AN-TRM" not in pars['magic_method_codes']:
pars['magic_method_codes']+=":LP-AN-TRM:AE-H:DA-AC-ATRM"
pars['specimen_correction']='c'
pars['specimen_int_corr_anisotropy']=Anisotropy_correction_factor
else:
pars["Anisotropy_correction_factor"]=1.0
pars["specimen_int_uT"]=float(pars["specimen_int"])*1e6
pars["AC_WARNING"]="No anistropy correction"
pars['specimen_correction']='u'
pars["specimen_int_corr_anisotropy"]=pars["Anisotropy_correction_factor"]
#-------------------------------------------------
# NLT and anisotropy correction together in one equation
# See Shaar et al (2010), Equation (3)
#-------------------------------------------------
if 'NLT_parameters' in list(Data[s].keys()):
alpha=Data[s]['NLT_parameters']['tanh_parameters'][0][0]
beta=Data[s]['NLT_parameters']['tanh_parameters'][0][1]
b=float(pars["specimen_b"])
Fa=pars["Anisotropy_correction_factor"]
if ((abs(b)*Fa)/alpha) <1.0:
Banc_NLT=math.atanh(((abs(b)*Fa)/alpha))/beta
pars["NLTC_specimen_int"]=Banc_NLT
pars["specimen_int_uT"]=Banc_NLT*1e6
if "AC_specimen_int" in list(pars.keys()):
pars["NLT_specimen_correction_factor"]=Banc_NLT/float(pars["AC_specimen_int"])
else:
pars["NLT_specimen_correction_factor"]=Banc_NLT/float(pars["specimen_int"])
if ":LP-TRM" not in pars['magic_method_codes']:
pars['magic_method_codes']+=":LP-TRM:DA-NL"
pars['specimen_correction']='c'
else:
GUI_log.write ("-W- WARNING: problematic NLT mesurements for specimens %s. Cant do NLT calculation. check data\n"%s)
pars["NLT_specimen_correction_factor"]=-1
else:
pars["NLT_specimen_correction_factor"]=-1
#-------------------------------------------------
# Calculate the final result with cooling rate correction
#-------------------------------------------------
pars["specimen_int_corr_cooling_rate"]=-999
if 'cooling_rate_data' in list(Data[s].keys()):
if 'CR_correction_factor' in list(Data[s]['cooling_rate_data'].keys()):
if Data[s]['cooling_rate_data']['CR_correction_factor'] != -1 and Data[s]['cooling_rate_data']['CR_correction_factor'] !=-999:
pars["specimen_int_corr_cooling_rate"]=Data[s]['cooling_rate_data']['CR_correction_factor']
pars['specimen_correction']='c'
pars["specimen_int_uT"]=pars["specimen_int_uT"]*pars["specimen_int_corr_cooling_rate"]
if ":DA-CR" not in pars['magic_method_codes']:
pars['magic_method_codes']+=":DA-CR"
if 'CR_correction_factor_flag' in list(Data[s]['cooling_rate_data'].keys()):
if Data[s]['cooling_rate_data']['CR_correction_factor_flag']=="calculated":
pars['CR_flag']="calculated"
else:
pars['CR_flag']=""
if 'CR_correction_factor_flag' in list(Data[s]['cooling_rate_data'].keys()) \
and Data[s]['cooling_rate_data']['CR_correction_factor_flag']!="calculated":
pars["CR_WARNING"]="inferred cooling rate correction"
else:
pars["CR_WARNING"]="no cooling rate correction"
def combine_dictionaries(d1, d2):
"""
combines dict1 and dict2 into a new dict.
if dict1 and dict2 share a key, the value from dict1 is used
"""
for key, value in d2.items():
if key not in list(d1.keys()):
d1[key] = value
return d1
Data[s]['pars'] = pars
#print pars.keys()
return(pars)
def calculate_ftest(s,sigma,nf):
chibar=(s[0][0]+s[1][1]+s[2][2])/3.
t=array(linalg.eigvals(s))
F=0.4*(t[0]**2+t[1]**2+t[2]**2 - 3*chibar**2)/(float(sigma)**2)
return(F)
def check_specimen_PI_criteria(pars,acceptance_criteria):
'''
# Check if specimen pass Acceptance criteria
'''
#if 'pars' not in self.Data[specimen].kes():
# return
pars['specimen_fail_criteria']=[]
for crit in list(acceptance_criteria.keys()):
if crit not in list(pars.keys()):
continue
if acceptance_criteria[crit]['value']==-999:
continue
if acceptance_criteria[crit]['category']!='IE-SPEC':
continue
cutoff_value=acceptance_criteria[crit]['value']
if crit=='specimen_scat':
if pars["specimen_scat"] in ["Fail",'b',0,'0','FALSE',"False",False,"f"]:
pars['specimen_fail_criteria'].append('specimen_scat')
elif crit=='specimen_k' or crit=='specimen_k_prime':
if abs(pars[crit])>cutoff_value:
pars['specimen_fail_criteria'].append(crit)
# high threshold value:
elif acceptance_criteria[crit]['threshold_type']=="high":
if pars[crit]>cutoff_value:
pars['specimen_fail_criteria'].append(crit)
elif acceptance_criteria[crit]['threshold_type']=="low":
if pars[crit]<cutoff_value:
pars['specimen_fail_criteria'].append(crit)
return pars
def get_site_from_hierarchy(sample,Data_hierarchy):
site=""
sites=list(Data_hierarchy['sites'].keys())
for S in sites:
if sample in Data_hierarchy['sites'][S]:
site=S
break
return(site)
def get_location_from_hierarchy(site,Data_hierarchy):
location=""
locations=list(Data_hierarchy['locations'].keys())
for L in locations:
if site in Data_hierarchy['locations'][L]:
location=L
break
return(location)
| bsd-3-clause |
clemkoa/scikit-learn | sklearn/neural_network/rbm.py | 26 | 12280 | """Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <[email protected]>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from scipy.special import expit # logistic function
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.validation import check_is_fitted
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hidden units. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self, "components_")
X = check_array(X, accept_sparse='csr', dtype=np.float64)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self, "components_")
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
def partial_fit(self, X, y=None):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='F')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self, "components_")
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='F')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
| bsd-3-clause |
ryfeus/lambda-packs | HDF4_H5_NETCDF/source2.7/numpy/lib/twodim_base.py | 7 | 26111 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
absolute, asanyarray, arange, zeros, greater_equal, multiply, ones,
asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal,
nonzero
)
from numpy.core import iinfo, transpose
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to m[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A) == A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``m[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A) == A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def eye(N, M=None, k=0, dtype=float, order='C'):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
order : {'C', 'F'}, optional
Whether the output should be stored in row-major (C-style) or
column-major (Fortran-style) order in memory.
.. versionadded:: 1.14.0
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype, order=order)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=None, weights=None,
density=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
density : bool, optional
If False, the default, returns the number of samples in each bin.
If True, returns the probability *density* function at the bin,
``bin_count / sample_count / bin_area``.
normed : bool, optional
An alias for the density argument that behaves identically. To avoid
confusion with the broken normed argument to `histogram`, `density`
should be preferred.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx+1,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny+1,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2-D histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(2, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))
>>> H = H.T # Let each row list bins with common y range.
:func:`imshow <matplotlib.pyplot.imshow>` can only display square bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131, title='imshow: square bins')
>>> plt.imshow(H, interpolation='nearest', origin='low',
... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges:
>>> ax = fig.add_subplot(132, title='pcolormesh: actual edges',
... aspect='equal')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
:class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to
display actual bin edges with interpolation:
>>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated',
... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = (xedges[:-1] + xedges[1:]) / 2
>>> ycenters = (yedges[:-1] + yedges[1:]) / 2
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights, density)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return nonzero(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return nonzero(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return nonzero(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| mit |
fzalkow/scikit-learn | sklearn/cross_validation.py | 96 | 58309 | """
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
check_array, column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
__all__ = ['KFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = cross_validation.KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = cross_validation.StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
This generator must include all elements in the test set exactly once.
Otherwise, a ValueError is raised.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
p = np.concatenate([p for p, _ in preds_blocks])
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
preds = p.copy()
preds[locs] = p
return preds
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
class FitFailedWarning(RuntimeWarning):
pass
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, a cv generator instance, or None
The input specifying which cv generator to use. It can be an
integer, in which case it is the number of folds in a KFold,
None, in which case 3 fold is used, or another object, that
will then be used as a cv generator.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.",
DeprecationWarning)
allow_nd = options.pop('allow_nd', None)
allow_lists = options.pop('allow_lists', None)
stratify = options.pop('stratify', None)
if allow_lists is not None:
warnings.warn("The allow_lists option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if allow_nd is not None:
warnings.warn("The allow_nd option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if allow_lists is False or allow_nd is False:
arrays = [check_array(x, 'csr', allow_nd=allow_nd,
force_all_finite=False, ensure_2d=False)
if x is not None else x
for x in arrays]
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
cv = StratifiedShuffleSplit(stratify, test_size=test_size,
train_size=train_size,
random_state=random_state)
else:
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| bsd-3-clause |
quinngroup/fergus-ssl | Extras/src/LabelPropagationDistributed.py | 1 | 15949 | # coding: utf-8
import numpy as np
from numpy.core.numeric import array
from sklearn.metrics import pairwise
from collections import OrderedDict
from scipy.linalg import eig
from scipy import interpolate as ip
from pyspark.mllib.linalg.distributed import IndexedRow, RowMatrix, IndexedRowMatrix, CoordinateMatrix, MatrixEntry
from pyspark.mllib.linalg import DenseVector, Vectors
from pyspark.mllib.feature import PCA as PCAmllib
from pyspark.sql.types import *
from pyspark.mllib.clustering import GaussianMixture
class LabelPropagationDistributed():
"""
A Label Propagation semi-supervised clustering algorithm based
on the paper "Semi-supervised Learning in Gigantic Image Collections"
by Fergus, Weiss and Torralba with modifications to fit Spark.
The algorithm begins with discretizing the datapoints into 1-D histograms.
For each independent dimension, it approximates the density using the histogram
and solves numerically for eigenfunctions and eigenvalues.
Then it uses the k eigenfunctions for k smallest eigenvalues to do a 1-D
interpolation of every data point.
These interpolated data points are the approximated eigenvectors of the Normalized
Laplacian of the original data points which are further used to solve a k*k system
of linear equation which yields alpha.
The alpha is then used to get approximate functions which are then clustered using
Gaussian Mixture Model.
For any new/ unseen data point, the point can be interpolated and using the alpha,
the approximate function for that point can be obtained whose label can be easily predicted
using the GMM model learned before making it inductive learning.
Based on
U{https://cs.nyu.edu/~fergus/papers/fwt_ssl.pdf}
Fergus, Weiss and Torralba, Semi-supervised Learning in Gigantic
Image Collections, Proceedings of the 22nd International Conference
on Neural Information Processing Systems, p.522-530, December 07-10, 2009,
Vancouver, British Columbia, Canada
>>> from LabelPropagationDistributed import LabelPropagationDistributed as LPD
>>> dataX = array([ 5.76961775, -6.55673209, 11.30752027, -1.56316985,
8.76722337, -1.54995049, 10.23511359, -6.20912033,
3.49161828, -3.02917744]).reshape(5,2)
>>> dataY = array([ 1, -1, 0, -1, -1])
>>> test = array([2.1159109 , 6.03520684, 1.04347698, -4.44740207,
-8.33902404, 4.20918959, 1.38447488, -1.50363493]).reshape(4,2)
>>> lpd = LPD(sc=sc, sqlContext = sqlContext, numBins = 5)
>>> lpd.fit(sc.parallelize(dataX),sc.parallelize(dataY))
>>> plabels_ = lpd.predict(sc.parallelize(test))
"""
global transformer
global selectValues
global bc_EdgeMeans
global bc_newg
global kb
def __init__(self, sc = None, sqlContext = None, k = -1,numBins = -1 ,lagrangian = 10):
self.k = k
self.numBins = numBins
self.lagrangian = lagrangian
self.sqlContext = sqlContext
self.sc = sc
def makeDF(self,rotatedData, dimensions):
"""
Convert data from RDD to a dataframe with every feature
as a different column.
:param rotatedData:
Data points with as independent dimensions as possible
:param dimensions
Total number of dimensions of the data
"""
X_ = rotatedData.map(lambda vec: np.array(vec))
dataAsDict = X_.map(lambda x: tuple(float(f) for f in x))
schemaString = ""
for i in range(dimensions):
i = i+1
schemaString += str(i) + " "
schemaString = schemaString.strip()
fields = [StructField(field_name,FloatType(), True) for field_name in schemaString.split()]
schema = StructType(fields)
return self.sqlContext.createDataFrame(dataAsDict, schema)
def getdataboundaries(self,dictData,k):
"""
For interpolating the data points, get min and max values for
the data set.
:param: dictData:
Dataframe of dataset
:param: k:
The number of eigenvectors to be selected. Default is equal
to the number of clusters.
"""
dataBounds = OrderedDict()
for i in range(0,k):
s = str(i+1)
tmprdd = dictData.select(s).rdd.map(lambda row: row.asDict().values()[0])
dataBounds[i] = (tmprdd.min(),tmprdd.max())
return dataBounds
def transformer(vec, bounds, bc_EdgeMeans, bc_newg):
"""
Interpolate and return data points
Runs on workers
:param: vec:
k dimenional data
:param: bounds:
Min and Max values for data to be interpolated.
:param: bc_EdgeMeans:
Histogram bin edges to build the 1-D interpolator
:param: bc_newg:
k smallest eigenvectors to build the 1-D interpolator
"""
vec1 = vec.toArray()
tmpArr = np.zeros(vec1.shape)
edge_means = bc_EdgeMeans.value
for i in range(len(vec1)):
inter2 = ip.interp1d(edge_means[:,i], bc_newg.value[:,i])
(minVal,maxVal) = bounds.get(i)
if (minVal < edge_means[:,i].min()) or (maxVal > edge_means[:,i].max()):
val = (((edge_means[:,i].max()-edge_means[:,i].min())*(vec1[i] - minVal))/(maxVal - minVal)) + edge_means[:,i].min()
if vec1[i]==minVal:
val = val + 0.001
if vec1[i]==maxVal:
val = val - 0.001
else:
val = vec1[i]
tmpArr[i] = inter2(val)
return DenseVector(tmpArr)
@staticmethod
def indRowMatMaker(irm):
"""
Converts RDD to indexed Row Matrix
:param: irm:
RDD to be converted to IndexedRowMatrix
"""
return IndexedRowMatrix(irm.zipWithIndex().map(lambda x:IndexedRow(x[1],x[0])))
def solver(self,functions):
"""
f = U * alpha
Get and return approximate eigenvectors from eigenfunctions using alpha.
:param: vectors:
Approximate eigenfunctions
"""
U = functions
alpha_bc = self.sc.broadcast(self.alpha.reshape(self.k,1))
f = U.map(lambda vec: np.dot(vec.toArray(), alpha_bc.value))
return f
def selectValues(ddict,kb):
"""
Select and return k eigenvectors from the Dataframe.
:param: ddict:
Dataframe Row as a dictionary.
:param: kb:
Desired number of eigenvectors to be selected.
Default equals to number of clusters
"""
desired_keys = sorted([int(k) for k in ddict.keys()])[0:kb.value]
newddict = { i: ddict[str(i)] for i in desired_keys }
return newddict
def relabel(self,labels):
"""
Label the data points in an ordered way depending on the ascending
order of the gaussian means.
:param: labels:
GMM predicted labels
"""
gaussians = np.zeros((self.k,1))
i=0
for ls in self.gmm.gaussians:
gaussians[i] = (ls.mu)
i = i +1
distmeans = self.sc.broadcast(np.argsort(gaussians.flatten()))
return labels.map(lambda x: np.where(distmeans.value == x)[0][0])
def _get_kernel(self, X, y = None,ker=None):
"""
return pairwise affinity matrix based on the kernel specified.
:param: X:
Data array
:param: y:
Label array
:param: ker:
kernel specified. Currently supporting Euclidean and rbf.
"""
if ker == "rbf":
if y is None:
return pairwise.rbf_kernel(X, X, gamma = 625)
else:
return pairwise.rbf_kernel(X, y, gamma = 625)
elif ker == "linear":
if y is None:
return pairwise.euclidean_distances(X, X)
else:
return pairwise.euclidean_distances(X, y)
else:
raise ValueError("is not a valid kernel. Only rbf and euclidean"
" are supported at this time")
def getParams(self, X, y ):
"""
Get all necessary parameters like total number of dimensions,
desired number of dimensions to work on k,
number of classes based on the labeled data available,
number of histogram bins and total data points n.
:param: X
RDD of Data points
:param: y
RDD of labels
"""
n = X.cache().count()
self.dimensions= X.zipWithIndex().filter(lambda (ls,i): i==0).map(lambda (row,i): len(row)).collect()[0]
classes = sorted(y.map(lambda x: (x,1)).reduceByKey(lambda a,b: a+b).map(lambda (x,arr): x).collect())
if classes[0] == -1:
classes = np.delete(classes, 0) # remove the -1 from this list
if self.k == -1:
self.k = np.size(classes)
if self.numBins == -1:
self.numBins = self.k + 1
if self.k > self.dimensions:
raise ValueError("k cannot be more than the number of features")
return (n,classes)
def rotate(self, X):
"""
Rotate the data to get independent dimensions.
:param: X
RDD of data points to be rotated
"""
XforPCA = X.map(lambda rw: Vectors.dense(rw))
self.PCA = PCAmllib(self.dimensions).fit(XforPCA)
rotatedData = self.PCA.transform(XforPCA)
return rotatedData
def approximateDensities(self, s, dictData):
"""
Discretize data into bins. Returns the new histograms and corresponding bin edges.
:param: s:
index to select dimension of the data
:param: dictData:
Dataframe of original data
"""
dimRdd = dictData.select(s)
binEdges,histograms = dimRdd.rdd.map(lambda x: x.asDict().values()[0]).histogram(self.numBins)
histograms = array(histograms)
binEdges = np.array(binEdges)
db = array(np.diff(binEdges),float)
histograms = histograms/db/histograms.sum()
histograms = histograms + 0.01
histograms /= histograms.sum()
return (histograms, binEdges)
def generalizedEigenSolver(self, histograms):
"""
A generalized Eigen Solver that gives approximate eigenfunctions and eigenvalues.
Based on Eqn. 2 in the paper.
:params: histograms:
Discretized data whose eigenfunctions and eigenvalues are to be evaluated.
"""
Wdis = self._get_kernel(histograms.reshape(histograms.shape[0],1),y=None,ker="linear")
P = np.diag(histograms)
Ddis = np.diag(np.sum((P.dot(Wdis.dot(P))),axis=0))
Dhat = np.diag(np.sum(P.dot(Wdis),axis=0))
sigmaVals, functions = eig((Ddis-(P.dot(Wdis.dot(P)))),(P.dot(Dhat)))
arg = np.argsort(np.real(sigmaVals))[1]
return (np.real(sigmaVals)[arg], np.real(functions)[:,arg])
def getKSmallest(self, dictData):
"""
Order and select k eigenvectors from k smallest eigenvalues.
:param: dictData:
Dataframe of data points
"""
sig = np.zeros(self.dimensions)
gee = np.zeros((self.numBins,self.dimensions))
b_edgeMeans = np.zeros((self.numBins,self.dimensions))
for i in range(self.dimensions):
s = str(i+1)
histograms, binEdges = self.approximateDensities(s, dictData)
b_edgeMeans[:,i] = np.array([binEdges[j:j + 2].mean() for j in range(binEdges.shape[0] - 1)])
sig[i], gee[:,i] = self.generalizedEigenSolver(histograms)
if np.isnan(np.min(sig)):
nan_num = np.isnan(sig)
sig[nan_num] = 0
ind = np.argsort(sig)[0:self.k]
return (sig[ind],gee[:,ind], b_edgeMeans[:,ind])
def broadcaster(self):
"""
Function to broadcast parameters that will be used on workers
"""
bc_EdgeMeans = self.sc.broadcast(self.newEdgeMeans)
bc_newg = self.sc.broadcast(self.newg)
kb = self.sc.broadcast(self.k)
return (bc_EdgeMeans, bc_newg, kb)
def getAlpha(self, approxValues, y, n, newsig):
"""
Using the approximate eigenfunctions, solve Eqn 1 in the paper and
solve it for alpha.
:params: approxValues:
Approximated eigenfunctions
:params: y:
RDD of label array
:params: n:
Size of data
:params: newsig:
k smallest eigenvalues
"""
U = LabelPropagationDistributed.indRowMatMaker(approxValues)
labeled_ind = np.array(y.zipWithIndex().filter(lambda (a,b): a!=-1).map(lambda (a,b): b).collect())
matent = []
for i in labeled_ind:
matent.append(MatrixEntry(i,i,self.lagrangian))
V = CoordinateMatrix(self.sc.parallelize(matent),numRows=n, numCols=n)
Utrans = U.toCoordinateMatrix().transpose()
Ublk = U.toBlockMatrix()
product1 = Utrans.toBlockMatrix().multiply(V.toBlockMatrix())
product2 = product1.multiply(Ublk)
S = np.diag(newsig)
localblk = product2.toLocalMatrix().toArray()
A = S + localblk
if np.linalg.det(A) == 0:
A = A + np.eye(A.shape[1])*0.000001
yblk = CoordinateMatrix(y.zipWithIndex().map(lambda x: MatrixEntry(x[1],0,x[0]))).toBlockMatrix()
b = product1.multiply(yblk).toLocalMatrix().toArray()
alpha = np.linalg.solve(A, b)
return alpha
def fit(self,X,y):
"""
A fit function that returns a label propagation semi-supervised clustering model.
:params X:
RDD of data points
:params: y:
RDD of labels
"""
if y is None:
raise ValueError("y cannot be None")
n,classes = self.getParams(X, y)
rotatedData = self.rotate(X)
dictData = self.makeDF(rotatedData, self.dimensions)
newsig,self.newg,self.newEdgeMeans = self.getKSmallest(dictData)
bc_EdgeMeans, bc_newg, kb = self.broadcaster()
dataBounds = self.getdataboundaries(dictData, self.k)
makeItMatrix = RowMatrix(dictData.rdd.map(lambda row: selectValues(row.asDict(), kb).values()))
approxValues = makeItMatrix.rows.map(lambda rw: transformer(rw, dataBounds, bc_EdgeMeans, bc_newg))
self.alpha = self.getAlpha(approxValues, y, n, newsig)
efunctions = self.solver(approxValues)
self.gmm = GaussianMixture.train(efunctions, np.size(classes), convergenceTol=0.0001,
maxIterations=5000, seed=None)
labels_ = self.gmm.predict(efunctions)
self.labels_ = self.relabel(labels_)
return self
def predict(self, X,y=None):
"""
Interpolate data and get approximate eigenvectors using alpha.
Predict labels for the eigenvectors using GMM.
:params: X:
RDD of data points whose label is to be predicted
:params: y:
Ground Truth for data points. RDD of Label array
"""
bc_EdgeMeans, bc_newg, kb = self.broadcaster()
testXforPCA = X.map(lambda rw: Vectors.dense(rw))
newX = self.PCA.transform(testXforPCA)
testdf = self.makeDF(newX, self.dimensions)
testdatabounds = self.getdataboundaries(testdf, self.k)
testmakeItMatrix = RowMatrix(testdf.rdd.map(lambda row: selectValues(row.asDict(), kb).values()))
testapproxValues = testmakeItMatrix.rows.map(lambda rw: transformer(rw, testdatabounds,bc_EdgeMeans, bc_newg))
testfunctions = self.solver(testapproxValues)
predictedlabels = self.relabel(self.gmm.predict(testfunctions))
return predictedlabels
| apache-2.0 |
AustereCuriosity/astropy | astropy/visualization/wcsaxes/axislabels.py | 4 | 4505 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import print_function, division, absolute_import
import numpy as np
from matplotlib.text import Text
import matplotlib.transforms as mtransforms
from .frame import RectangularFrame
class AxisLabels(Text):
def __init__(self, frame, minpad=1, *args, **kwargs):
self._frame = frame
super(AxisLabels, self).__init__(*args, **kwargs)
self.set_clip_on(True)
self.set_visible_axes('all')
self.set_ha('center')
self.set_va('center')
self._minpad = minpad
def get_minpad(self, axis):
try:
return self._minpad[axis]
except TypeError:
return self._minpad
def set_visible_axes(self, visible_axes):
self._visible_axes = visible_axes
def get_visible_axes(self):
if self._visible_axes == 'all':
return self._frame.keys()
else:
return [x for x in self._visible_axes if x in self._frame]
def set_minpad(self, minpad):
self._minpad = minpad
def draw(self, renderer, bboxes, ticklabels_bbox_list, visible_ticks):
if not self.get_visible():
return
text_size = renderer.points_to_pixels(self.get_size())
for axis in self.get_visible_axes():
padding = text_size * self.get_minpad(axis)
# Find position of the axis label. For now we pick the mid-point
# along the path but in future we could allow this to be a
# parameter.
x_disp, y_disp = self._frame[axis].pixel[:, 0], self._frame[axis].pixel[:, 1]
d = np.hstack([0., np.cumsum(np.sqrt(np.diff(x_disp) ** 2 + np.diff(y_disp) ** 2))])
xcen = np.interp(d[-1] / 2., d, x_disp)
ycen = np.interp(d[-1] / 2., d, y_disp)
# Find segment along which the mid-point lies
imin = np.searchsorted(d, d[-1] / 2.) - 1
# Find normal of the axis label facing outwards on that segment
normal_angle = self._frame[axis].normal_angle[imin] + 180.
label_angle = (normal_angle - 90.) % 360.
if 135 < label_angle < 225:
label_angle += 180
self.set_rotation(label_angle)
# Find label position by looking at the bounding box of ticks'
# labels and the image. It sets the default padding at 1 times the
# axis label font size which can also be changed by setting
# the minpad parameter.
if isinstance(self._frame, RectangularFrame):
if len(ticklabels_bbox_list) > 0:
ticklabels_bbox = mtransforms.Bbox.union(ticklabels_bbox_list)
else:
ticklabels_bbox = None
if axis == 'l':
if axis in visible_ticks and ticklabels_bbox is not None:
left = ticklabels_bbox.xmin
else:
left = xcen
xpos = left - padding
self.set_position((xpos, ycen))
elif axis == 'r':
if axis in visible_ticks and ticklabels_bbox is not None:
right = ticklabels_bbox.x1
else:
right = xcen
xpos = right + padding
self.set_position((xpos, ycen))
elif axis == 'b':
if axis in visible_ticks and ticklabels_bbox is not None:
bottom = ticklabels_bbox.ymin
else:
bottom = ycen
ypos = bottom - padding
self.set_position((xcen, ypos))
elif axis == 't':
if axis in visible_ticks and ticklabels_bbox is not None:
top = ticklabels_bbox.y1
else:
top = ycen
ypos = top + padding
self.set_position((xcen, ypos))
else: # arbitrary axis
dx = np.cos(np.radians(normal_angle)) * (padding + text_size * 1.5)
dy = np.sin(np.radians(normal_angle)) * (padding + text_size * 1.5)
self.set_position((xcen + dx, ycen + dy))
super(AxisLabels, self).draw(renderer)
bb = super(AxisLabels, self).get_window_extent(renderer)
bboxes.append(bb)
| bsd-3-clause |
yanlend/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 264 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
NunoEdgarGub1/scikit-learn | examples/covariance/plot_outlier_detection.py | 235 | 3891 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates two
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"robust covariance estimator": EllipticEnvelope(contamination=.1)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = 0
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(0.5 * n_inliers, 2) - offset
X2 = 0.3 * np.random.randn(0.5 * n_inliers, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model with the One-Class SVM
plt.figure(figsize=(10, 5))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
y_pred = clf.decision_function(X).ravel()
threshold = stats.scoreatpercentile(y_pred,
100 * outliers_fraction)
y_pred = y_pred > threshold
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(1, 2, i + 1)
subplot.set_title("Outlier detection")
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=11))
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.show()
| bsd-3-clause |
zetaris/zeppelin | python/src/main/resources/python/bootstrap_sql.py | 60 | 1189 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Setup SQL over Pandas DataFrames
# It requires next dependencies to be installed:
# - pandas
# - pandasql
from __future__ import print_function
try:
from pandasql import sqldf
pysqldf = lambda q: sqldf(q, globals())
except ImportError:
pysqldf = lambda q: print("Can not run SQL over Pandas DataFrame" +
"Make sure 'pandas' and 'pandasql' libraries are installed")
| apache-2.0 |
rosswhitfield/mantid | Framework/PythonInterface/test/python/mantid/plots/UtilityTest.py | 3 | 1210 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid package
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from mantid.plots.utility import legend_set_draggable
from unittest.mock import create_autospec
from matplotlib.legend import Legend
class UtilityTest(unittest.TestCase):
def test_legend_set_draggable(self):
legend = create_autospec(Legend)
args = (None, False, 'loc')
legend_set_draggable(legend, *args)
if hasattr(Legend, 'set_draggable'):
legend.set_draggable.assert_called_with(*args)
else:
legend.draggable.assert_called_with(*args)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
bikestra/bdapy | incumbency.py | 1 | 7316 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# # Incumbency in Congressional Elections (BDA 14.3) #
#
# This notebook page reproduces analysis done in BDA 14.3, which investigates the advantage of incumbents in congressional elections.
# <codecell>
% pylab inline
from pandas import *
# <markdowncell>
# Here, we read congressional election results from 1902 to 1992 from BDA book website.
# <codecell>
# construct a list of DataFrames for each year
dfs = []
# it seems like years prior to 1990 were not analyzed in the book
for year in range(1900, 1994, 2):
year_df = read_csv("http://www.stat.columbia.edu/~gelman/book/data/incumbency/%d.asc" % year,
delimiter=r"\s+", header=None)
year_df['year'] = year
dfs.append(year_df)
incumbency_df = concat(dfs)
incumbency_df.rename(columns={0:'state',1:'district',2:'incumbency',3:'democratic',4:'republican'},
inplace=True)
# compute proportion of democratic votes
incumbency_df['demoprop'] = incumbency_df['democratic'] / \
(incumbency_df['democratic'] + incumbency_df['republican'])
# <markdowncell>
# Preprocessed data look like belows. `state` and `district` columns identify the district, while `democratic` and `republican` are the number of votes each party received. `incumbency` is +1 if Democrats controled the seat and -1 if Republicans controled the seat before the election.
# <codecell>
incumbency_df.head()
# <markdowncell>
# For illustration, let us look at how the election result in 1986 is correlated with that in 1988. For this, we create a separate DataFrame for each year.
# <codecell>
df_1986 = incumbency_df.loc[(incumbency_df['year'] == 1986),('state','district','democratic','republican','demoprop')]
df_1988 = incumbency_df.loc[incumbency_df['year'] == 1988,('state','district','democratic','republican','demoprop','incumbency')]
df_1986 = df_1986.rename(columns={'demoprop':'demoprop1986','democratic':'democratic1986','republican':'republican1986'})
df_1988 = df_1988.rename(columns={'demoprop':'demoprop1988','incumbency':'incumbency1988','democratic':'democratic1988','republican':'republican1988'})
# <markdowncell>
# Then, we merge them by district.
# <codecell>
df_1986_1988 = merge(df_1986, df_1988, on=('state', 'district'))
# <markdowncell>
# We are only interested in cases Democratic and Republican candidates competed with each other.
# <codecell>
# filter out cases
filtered_df_1986_1988 = df_1986_1988.loc[(df_1986_1988['democratic1986'] > 0) & (df_1986_1988['republican1986'] > 0) &\
(df_1986_1988['democratic1988'] > 0) & (df_1986_1988['republican1988'] > 0)]
# <codecell>
filtered_df_1986_1988.head(20)
# <markdowncell>
# Figure 14.1 which shows the proportion of Democratic vote as a function of that in 1988 is reproduced below. Two variables seems to have very strong linear relationship, and it seems critical to use previous year's result as a covariate variable.
# <codecell>
fig, ax = plt.subplots()
filtered_df_1986_1988[filtered_df_1986_1988['incumbency1988'] != 0].plot('demoprop1986', 'demoprop1988', kind='scatter', marker='.', ax=ax)
filtered_df_1986_1988[filtered_df_1986_1988['incumbency1988'] == 0].plot('demoprop1986', 'demoprop1988', kind='scatter', marker='o', facecolors='none', ax=ax)
ax.set_xlim(-.05,1.05)
ax.set_ylim(-.05,1.05)
ax.set_xlabel('Democratic vote in 1986')
ax.set_ylabel('Democratic vote in 1988')
ax.set_aspect('equal')
plt.show()
# <markdowncell>
# Now, we learn a linear model for each year's election result to estimate the effect of incumbency.
# <codecell>
# first, come up with the list of years we will learn linear model on
years = unique(incumbency_df['year'])
# <codecell>
medians = []
for index in range(1, len(years)):
now_year = years[index]
prev_year = years[index - 1]
df_prev = incumbency_df.loc[(incumbency_df['year'] == prev_year),('state','district','demoprop','democratic','republican')]
df_now = incumbency_df.loc[incumbency_df['year'] == now_year,('state','district','demoprop','incumbency','democratic','republican')]
df_prev = df_prev.rename(columns={'demoprop':'demoprop_prev','democratic':'democratic_prev','republican':'republican_prev'})
df_now = df_now.rename(columns={'demoprop':'demoprop_now','democratic':'democratic_now','republican':'republican_now'})
df_now['is_incumbent'] = abs(df_now['incumbency'])
df_now['constant'] = 1
df_prev_now = merge(df_prev, df_now, on=('state', 'district'))
df_prev_now = df_prev_now.loc[(df_prev_now['democratic_now'] > 0) & (df_prev_now['republican_now'] > 0) & \
(df_prev_now['democratic_prev'] > 0) & (df_prev_now['republican_prev'] > 0)]
df_prev_now['incumbent_party'] = (df_prev_now['demoprop_prev'] > 0.5).map({True:1,False:-1})
df_prev_now['prevprop'] = df_prev_now['demoprop_prev']
df_prev_now['nowprop'] = df_prev_now['demoprop_now']
df_prev_now.loc[df_prev_now['demoprop_prev'] < 0.5, 'prevprop'] = 1.0 - df_prev_now['demoprop_prev'][df_prev_now['demoprop_prev'] < 0.5]
df_prev_now.loc[df_prev_now['demoprop_prev'] < 0.5, 'nowprop'] = 1.0 - df_prev_now['demoprop_now'][df_prev_now['demoprop_prev'] < 0.5]
X = matrix(df_prev_now.loc[:,('constant', 'is_incumbent', 'incumbent_party','prevprop')].as_matrix())
y = matrix(df_prev_now.loc[:,('nowprop')].as_matrix()).T
XtX = X.T * X
Xty = X.T * y
beta_hat = linalg.solve(XtX, Xty)
medians.append(beta_hat[1,0])
if now_year == 1988:
print beta_hat
print medians
#break
# <codecell>
plt.scatter(years[1:], medians)
# <codecell>
df_prev_now['residual'] = y - X * beta_hat
fig, ax = plt.subplots()
df_prev_now[df_prev_now['is_incumbent'] == 1].plot('prevprop', 'residual', kind='scatter', marker='.', ax=ax)
df_prev_now[df_prev_now['is_incumbent'] == 0].plot('prevprop', 'residual', kind='scatter', marker='o', facecolors='none', ax=ax)
#filtered_df_1986_1988[filtered_df_1986_1988['incumbency1988'] == 0].plot('demoprop1986', 'demoprop1988', kind='scatter', marker='o', facecolors='none', ax=ax)
#ax.set_xlim(-.05,1.05)
#ax.set_ylim(-.05,1.05)
#ax.set_xlabel('Democratic vote in 1986')
#ax.set_ylabel('Democratic vote in 1988')
#ax.set_aspect('equal')
plt.show()
# <codecell>
df_prev_now['prevprop']
# <codecell>
df_prev_now['residual'] = y - X * beta_hat
# <codecell>
df_prev_now.head()
# <codecell>
import statsmodels.api as sm
# <codecell>
est = sm.OLS(y, X)
# <codecell>
est = est.fit()
est.summary()
# <codecell>
prev_year
# <codecell>
df_prev_now['prevprop'] = df_prev_now['demoprop_prev']
df_prev_now['nowprop'] = df_prev_now['demoprop_now']
df_prev_now['prevprop'][df_prev_now['demoprop_prev'] < 0.5] = 1.0 - df_prev_now['demoprop_prev'][df_prev_now['demoprop_prev'] < 0.5]
df_prev_now['nowprop'][df_prev_now['demoprop_prev'] < 0.5] = 1.0 - df_prev_now['demoprop_now'][df_prev_now['demoprop_prev'] < 0.5]
# <codecell>
df_prev_now
# <codecell>
df_prev_now['demoprop_prev'] > 0.5
# <codecell>
df_prev_now
# <codecell>
prev_year
# <codecell>
beta_hat
# <codecell>
now_year
# <codecell>
sum(incumbency_df['democratic'] < 0)
# <codecell>
sum(incumbency_df['republican'] < 0)
# <codecell>
unique(df_1986_1988['democratic1986'])
# <codecell>
| apache-2.0 |
bchareyre/trial | py/plot.py | 5 | 34986 | # encoding: utf-8
# 2008 © Václav Šmilauer <[email protected]>
"""
Module containing utility functions for plotting inside yade. See :ysrc:`examples/simple-scene/simple-scene-plot.py` or :ysrc:`examples/concrete/uniax.py` for example of usage.
"""
## all exported names
__all__=['data','plots','labels','live','liveInterval','autozoom','plot','reset','resetData','splitData','reverseData','addData','addAutoData','saveGnuplot','saveDataTxt','savePlotSequence']
# multi-threaded support for Tk
# safe to import even if Tk will not be used
import mtTkinter as Tkinter
try:
import Image
except:
try:
import PIL.Image
except:
import warnings
warnings.warn("PIL (python-imaging package) must be installed to use yade.plot")
import matplotlib,os,time,math,itertools
# running in batch
#
# If GtkAgg is the default, X must be working, which is not the case
# with batches (DISPLAY is unset in such case) and importing pylab fails then.
#
# Agg does not require the GUI part and works without any DISPLAY active
# just fine.
#
# see http://www.mail-archive.com/[email protected]/msg04320.html
# and https://lists.launchpad.net/yade-users/msg03289.html
#
import yade.runtime
if not yade.runtime.hasDisplay: matplotlib.use('Agg')
try:
from minieigen import *
except ImportError:
from miniEigen import *
#matplotlib.use('TkAgg')
#matplotlib.use('GTKAgg')
##matplotlib.use('QtAgg')
matplotlib.rc('axes',grid=True) # put grid in all figures
import pylab
data={}
"Global dictionary containing all data values, common for all plots, in the form {'name':[value,...],...}. Data should be added using plot.addData function. All [value,...] columns have the same length, they are padded with NaN if unspecified."
imgData={}
"Dictionary containing lists of strings, which have the meaning of images corresponding to respective :yref:`yade.plot.data` rows. See :yref:`yade.plot.plots` on how to plot images."
plots={} # dictionary x-name -> (yspec,...), where yspec is either y-name or (y-name,'line-specification')
"dictionary x-name -> (yspec,...), where yspec is either y-name or (y-name,'line-specification'). If ``(yspec,...)`` is ``None``, then the plot has meaning of image, which will be taken from respective field of :yref:`yade.plot.imgData`."
labels={}
"Dictionary converting names in data to human-readable names (TeX names, for instance); if a variable is not specified, it is left untranslated."
xylabels={}
"Dictionary of 2-tuples specifying (xlabel,ylabel) for respective plots; if either of them is None, the default auto-generated title is used."
legendLoc=('upper left','upper right')
"Location of the y1 and y2 legends on the plot, if y2 is active."
live=True if yade.runtime.hasDisplay else False
"Enable/disable live plot updating. Disabled by default for now, since it has a few rough edges."
liveInterval=1
"Interval for the live plot updating, in seconds."
autozoom=True
"Enable/disable automatic plot rezooming after data update."
scientific=True if hasattr(pylab,'ticklabel_format') else False ## safe default for older matplotlib versions
"Use scientific notation for axes ticks."
axesWd=0
"Linewidth (in points) to make *x* and *y* axes better visible; not activated if non-positive."
current=-1
"Point that is being tracked with a scatter point. -1 is for the last point, set to *nan* to disable."
afterCurrentAlpha=.2
"Color alpha value for part of lines after :yref:`yade.plot.current`, between 0 (invisible) to 1 (full color)"
scatterMarkerKw=dict(verts=[(0.,0.),(-30.,10.),(-25,0),(-30.,-10.)],marker=None)
"Parameters for the current position marker"
componentSeparator='_'
componentSuffixes={Vector2:{0:'x',1:'y'},Vector3:{0:'x',1:'y',2:'z'},Matrix3:{(0,0):'xx',(1,1):'yy',(2,2):'zz',(0,1):'xy',(0,2):'xz',(1,2):'yz',(1,0):'yz',(2,0):'zx',(2,1):'zy'}}
# if a type with entry in componentSuffixes is given in addData, columns for individual components are synthesized using indices and suffixes given for each type, e.g. foo=Vector3r(1,2,3) will result in columns foox=1,fooy=2,fooz=3
def reset():
"Reset all plot-related variables (data, plots, labels)"
global data, plots, labels # plotLines
data={}; plots={}; imgData={} # plotLines={};
pylab.close('all')
def resetData():
"Reset all plot data; keep plots and labels intact."
global data
data={}
from yade.wrapper import *
def splitData():
"Make all plots discontinuous at this point (adds nan's to all data fields)"
addData({})
def reverseData():
"""Reverse yade.plot.data order.
Useful for tension-compression test, where the initial (zero) state is loaded and, to make data continuous, last part must *end* in the zero state.
"""
for k in data: data[k].reverse()
def addDataColumns(dd):
'''Add new columns with NaN data, without adding anything to other columns. Does nothing for columns that already exist'''
numSamples=len(data[data.keys()[0]]) if len(data)>0 else 0
for d in dd:
if d in data.keys(): continue
data[d]=[nan for i in range(numSamples)]
def addAutoData():
"""Add data by evaluating contents of :yref:`yade.plot.plots`. Expressions rasing exceptions will be handled gracefully, but warning is printed for each.
>>> from yade import plot
>>> from pprint import pprint
>>> O.reset()
>>> plot.resetData()
>>> plot.plots={'O.iter':('O.time',None,'numParticles=len(O.bodies)')}
>>> plot.addAutoData()
>>> pprint(plot.data)
{'O.iter': [0], 'O.time': [0.0], 'numParticles': [0]}
Note that each item in :yref:`yade.plot.plots` can be
* an expression to be evaluated (using the ``eval`` builtin);
* ``name=expression`` string, where ``name`` will appear as label in plots, and expression will be evaluated each time;
* a dictionary-like object -- current keys are labels of plots and current values are added to :yref:`yade.plot.data`. The contents of the dictionary can change over time, in which case new lines will be created as necessary.
A simple simulation with plot can be written in the following way; note how the energy plot is specified.
>>> from yade import plot, utils
>>> plot.plots={'i=O.iter':(O.energy,None,'total energy=O.energy.total()')}
>>> # we create a simple simulation with one ball falling down
>>> plot.resetData()
>>> O.bodies.append(utils.sphere((0,0,0),1))
0
>>> O.dt=utils.PWaveTimeStep()
>>> O.engines=[
... ForceResetter(),
... GravityEngine(gravity=(0,0,-10),warnOnce=False),
... NewtonIntegrator(damping=.4,kinSplit=True),
... # get data required by plots at every step
... PyRunner(command='yade.plot.addAutoData()',iterPeriod=1,initRun=True)
... ]
>>> O.trackEnergy=True
>>> O.run(2,True)
>>> pprint(plot.data) #doctest: +ELLIPSIS
{'gravWork': [0.0, -25.13274...],
'i': [0, 1],
'kinRot': [0.0, 0.0],
'kinTrans': [0.0, 7.5398...],
'nonviscDamp': [0.0, 10.0530...],
'total energy': [0.0, -7.5398...]}
"""
# this part of docstring does not work with Sphinx
"""
.. plot::
from yade import *
from yade import plot,utils
O.reset()
O.engines=[ForceResetter(),GravityEngine(gravity=(0,0,-10),warnOnce=False),NewtonIntegrator(damping=.4,kinSplit=True),PyRunner(command='yade.plot.addAutoData()',iterPeriod=1,initRun=True)]
O.bodies.append(utils.sphere((0,0,0),1)); O.dt=utils.PWaveTimeStep()
plot.resetData()
plot.plots={'i=O.iter':(O.energy,None,'total energy=O.energy.total()')}
O.trackEnergy=True
O.run(50,True)
import pylab; pylab.grid(True)
plot.legendLoc=('lower left','upper right')
plot.plot(noShow=True)
"""
def colDictUpdate(col,dic):
'update *dic* with the value from col, which is a "expr" or "name=expr" string; all exceptions from ``eval`` are caught and warning is printed without adding any data.'
name,expr=col.split('=',1) if '=' in col else (col,col)
try:
val=eval(expr)
dic.update({name:val})
except:
print 'WARN: ignoring exception raised while evaluating auto-column `'+expr+"'%s."%('' if name==expr else ' ('+name+')')
cols={}
for p in plots:
pp=plots[p]
colDictUpdate(p.strip(),cols)
for y in tuplifyYAxis(plots[p]):
# imgplot specifier
if y==None: continue
yy=addPointTypeSpecifier(y,noSplit=True)[0]
# dict-like object
if hasattr(yy,'keys'): cols.update(dict(yy))
# callable returning list sequence of expressions to evaluate
#elif callable(yy):
# for yyy in yy(): colDictUpdate(yyy,cols)
# plain value
else: colDictUpdate(yy,cols)
addData(cols)
def addData(*d_in,**kw):
"""Add data from arguments name1=value1,name2=value2 to yade.plot.data.
(the old {'name1':value1,'name2':value2} is deprecated, but still supported)
New data will be padded with nan's, unspecified data will be nan (nan's don't appear in graphs).
This way, equal length of all data is assured so that they can be plotted one against any other.
>>> from yade import plot
>>> from pprint import pprint
>>> plot.resetData()
>>> plot.addData(a=1)
>>> plot.addData(b=2)
>>> plot.addData(a=3,b=4)
>>> pprint(plot.data)
{'a': [1, nan, 3], 'b': [nan, 2, 4]}
Some sequence types can be given to addData; they will be saved in synthesized columns for individual components.
>>> plot.resetData()
>>> plot.addData(c=Vector3(5,6,7),d=Matrix3(8,9,10, 11,12,13, 14,15,16))
>>> pprint(plot.data)
{'c_x': [5.0],
'c_y': [6.0],
'c_z': [7.0],
'd_xx': [8.0],
'd_xy': [9.0],
'd_xz': [10.0],
'd_yy': [12.0],
'd_yz': [11.0],
'd_zx': [14.0],
'd_zy': [15.0],
'd_zz': [16.0]}
"""
import numpy
if len(data)>0: numSamples=len(data[data.keys()[0]])
else: numSamples=0
# align with imgData, if there is more of them than data
if len(imgData)>0 and numSamples==0: numSamples=max(numSamples,len(imgData[imgData.keys()[0]]))
d=(d_in[0] if len(d_in)>0 else {})
d.update(**kw)
# handle types composed of multiple values (vectors, matrices)
dNames=d.keys()[:] # make copy, since dict cannot change size if iterated over directly
for name in dNames:
if type(d[name]) in componentSuffixes:
val=d[name]
suffixes=componentSuffixes[type(d[name])]
for ix in suffixes: d[name+componentSeparator+suffixes[ix]]=d[name][ix]
del d[name]
elif hasattr(d[name],'__len__'):
raise ValueError('plot.addData given unhandled sequence type (is a '+type(d[name]).__name__+', must be number or '+'/'.join([k.__name__ for k in componentSuffixes])+')')
for name in d:
if not name in data.keys(): data[name]=[]
for name in data:
data[name]+=(numSamples-len(data[name]))*[nan]
data[name].append(d[name] if name in d else nan)
#print [(k,len(data[k])) for k in data.keys()]
#numpy.array([nan for i in range(numSamples)])
#numpy.append(data[name],[d[name]],1)
def addImgData(**kw):
for k in kw:
if k not in imgData: imgData[k]=[]
# align imgData with data
if len(data.keys())>0 and len(imgData.keys())>0:
nData,nImgData=len(data[data.keys()[0]]),len(imgData[imgData.keys()[0]])
#if nImgData>nData-1: raise RuntimeError("imgData is already the same length as data?")
if nImgData<nData-1: # repeat last value
for k in imgData.keys():
lastValue=imgData[k][-1] if len(imgData[k])>0 else None
imgData[k]+=(nData-len(imgData[k])-1)*[lastValue]
elif nData<nImgData:
for k in data.keys():
lastValue=data[k][-1] if len(data[k])>0 else nan
data[k]+=(nImgData-nData)*[lastValue] # add one more, because we will append to imgData below
# add values from kw
newLen=(len(imgData[imgData.keys()[0]]) if imgData else 0)+1 # current length plus 1
for k in kw:
if k in imgData and len(imgData[k])>0: imgData[k]+=(newLen-len(imgData[k])-1)*[imgData[k][-1]]+[kw[k]] # repeat last element as necessary
else: imgData[k]=(newLen-1)*[None]+[kw[k]] # repeat None if no previous value
# align values which were not in kw by repeating the last value
for k in imgData:
if len(imgData[k])<newLen: imgData[k]+=(newLen-len(imgData[k]))*[imgData[k][-1]]
assert(len(set([len(i) for i in imgData.values()]))<=1) # no data or all having the same value
# not public functions
def addPointTypeSpecifier(o,noSplit=False):
"""Add point type specifier to simple variable name; optionally take only the part before '=' from the first item."""
if type(o) in [tuple,list]:
if noSplit or not type(o[0])==str: return o
else: return (o[0].split('=',1)[0],)+tuple(o[1:])
else: return (o if (noSplit or not type(o)==str) else (o.split('=',1)[0]),'')
def tuplifyYAxis(pp):
"""convert one variable to a 1-tuple"""
if type(pp) in [tuple,list]: return pp
else: return (pp,)
def xlateLabel(l):
"Return translated label; return l itself if not in the labels dict."
global labels
if l in labels.keys(): return labels[l]
else: return l
class LineRef:
"""Holds reference to plot line and to original data arrays (which change during the simulation),
and updates the actual line using those data upon request."""
def __init__(self,line,scatter,line2,xdata,ydata,dataName=None):
self.line,self.scatter,self.line2,self.xdata,self.ydata,self.dataName=line,scatter,line2,xdata,ydata,dataName
def update(self):
if isinstance(self.line,matplotlib.image.AxesImage):
# image name
try:
if len(self.xdata)==0 and self.dataName: self.xdata=imgData[self.dataName] # empty list reference an empty singleton, not the list we want; adjust here
if self.xdata[current]==None: img=Image.new('RGBA',(1,1),(0,0,0,0))
else: img=Image.open(self.xdata[current])
self.line.set_data(img)
except IndexError: pass
else:
# regular data
import numpy
# current==-1 avoids copy slicing data in the else part
if current==None or current==-1 or afterCurrentAlpha==1:
self.line.set_xdata(self.xdata); self.line.set_ydata(self.ydata)
self.line2.set_xdata([]); self.line2.set_ydata([])
else:
try: # try if we can extend the first part by one so that lines are connected
self.xdata[:current+1]; preCurrEnd=current+1
except IndexError: preCurrEnd=current
preCurrEnd=current+(1 if len(self.xdata)>current else 0)
self.line.set_xdata(self.xdata[:preCurrEnd]); self.line.set_ydata(self.ydata[:preCurrEnd])
self.line2.set_xdata(self.xdata[current:]); self.line2.set_ydata(self.ydata[current:])
try:
x,y=self.xdata[current],self.ydata[current]
except IndexError: x,y=0,0
# this could be written in a nicer way, very likely
try:
pt=numpy.ndarray((2,),buffer=numpy.array([float(x),float(y)]))
if self.scatter:
self.scatter.set_offsets(pt)
# change rotation of the marker (possibly incorrect)
try:
dx,dy=self.xdata[current]-self.xdata[current-1],self.ydata[current]-self.ydata[current-1]
# smoothing from last n values, if possible
# FIXME: does not show arrow at all if less than window values
#try:
# window=10
# dx,dy=[numpy.average(numpy.diff(dta[current-window:current])) for dta in self.xdata,self.ydata]
#except IndexError: pass
# there must be an easier way to find on-screen derivative angle, ask on the matplotlib mailing list
axes=self.line.get_axes()
p=axes.patch; xx,yy=p.get_verts()[:,0],p.get_verts()[:,1]; size=max(xx)-min(xx),max(yy)-min(yy)
aspect=(size[1]/size[0])*(1./axes.get_data_ratio())
angle=math.atan(aspect*dy/dx)
if dx<0: angle-=math.pi
self.scatter.set_transform(matplotlib.transforms.Affine2D().rotate(angle))
except IndexError: pass
except TypeError: pass # this happens at i386 with empty data, saying TypeError: buffer is too small for requested array
currLineRefs=[]
liveTimeStamp=0 # timestamp when live update was started, so that the old thread knows to stop if that changes
nan=float('nan')
def createPlots(subPlots=True,scatterSize=60,wider=False):
global currLineRefs
figs=set([l.line.get_axes().get_figure() for l in currLineRefs]) # get all current figures
for f in figs: pylab.close(f) # close those
currLineRefs=[] # remove older plots (breaks live updates of windows that are still open)
if len(plots)==0: return # nothing to plot
if subPlots:
# compute number of rows and colums for plots we have
subCols=int(round(math.sqrt(len(plots)))); subRows=int(math.ceil(len(plots)*1./subCols))
if wider: subRows,subCols=subCols,subRows
for nPlot,p in enumerate(plots.keys()):
pStrip=p.strip().split('=',1)[0]
if not subPlots: pylab.figure()
else: pylab.subplot(subRows,subCols,nPlot)
if plots[p]==None: # image plot
if not pStrip in imgData.keys(): imgData[pStrip]=[]
# fake (empty) image if no data yet
if len(imgData[pStrip])==0 or imgData[pStrip][-1]==None: img=Image.new('RGBA',(1,1),(0,0,0,0))
else: img=Image.open(imgData[pStrip][-1])
img=pylab.imshow(img,origin='lower')
currLineRefs.append(LineRef(img,None,None,imgData[pStrip],None,pStrip))
pylab.gca().set_axis_off()
continue
plots_p=[addPointTypeSpecifier(o) for o in tuplifyYAxis(plots[p])]
plots_p_y1,plots_p_y2=[],[]; y1=True
missing=set() # missing data columns
if pStrip not in data.keys(): missing.add(pStrip)
for d in plots_p:
if d[0]==None:
y1=False; continue
if y1: plots_p_y1.append(d)
else: plots_p_y2.append(d)
if d[0] not in data.keys() and not callable(d[0]) and not hasattr(d[0],'keys'): missing.add(d[0])
if missing:
if len(data.keys())==0 or len(data[data.keys()[0]])==0: # no data at all yet, do not add garbage NaNs
for m in missing: data[m]=[]
else:
print 'Missing columns in plot.data, adding NaN: ',','.join(list(missing))
addDataColumns(missing)
def createLines(pStrip,ySpecs,isY1=True,y2Exists=False):
'''Create data lines from specifications; this code is common for y1 and y2 axes;
it handles y-data specified as callables, which might create additional lines when updated with liveUpdate.
'''
# save the original specifications; they will be smuggled into the axes object
# the live updated will run yNameFuncs to see if there are new lines to be added
# and will add them if necessary
yNameFuncs=set([d[0] for d in ySpecs if callable(d[0])]) | set([d[0].keys for d in ySpecs if hasattr(d[0],'keys')])
yNames=set()
ySpecs2=[]
for ys in ySpecs:
# ys[0]() must return list of strings, which are added to ySpecs2; line specifier is synthesized by tuplifyYAxis and cannot be specified by the user
if callable(ys[0]): ySpecs2+=[(ret,ys[1]) for ret in ys[0]()]
elif hasattr(ys[0],'keys'): ySpecs2+=[(yy,'') for yy in ys[0].keys()]
else: ySpecs2.append(ys)
if len(ySpecs2)==0:
print 'yade.plot: creating fake plot, since there are no y-data yet'
line,=pylab.plot([nan],[nan])
line2,=pylab.plot([nan],[nan])
currLineRefs.append(LineRef(line,None,line2,[nan],[nan]))
# set different color series for y1 and y2 so that they are recognizable
if pylab.rcParams.has_key('axes.color_cycle'): pylab.rcParams['axes.color_cycle']='b,g,r,c,m,y,k' if not isY1 else 'm,y,k,b,g,r,c'
for d in ySpecs2:
yNames.add(d)
line,=pylab.plot(data[pStrip],data[d[0]],d[1],label=xlateLabel(d[0]))
line2,=pylab.plot([],[],d[1],color=line.get_color(),alpha=afterCurrentAlpha)
# use (0,0) if there are no data yet
scatterPt=[0,0] if len(data[pStrip])==0 else (data[pStrip][current],data[d[0]][current])
# if current value is NaN, use zero instead
scatter=pylab.scatter(scatterPt[0] if not math.isnan(scatterPt[0]) else 0,scatterPt[1] if not math.isnan(scatterPt[1]) else 0,s=scatterSize,color=line.get_color(),**scatterMarkerKw)
currLineRefs.append(LineRef(line,scatter,line2,data[pStrip],data[d[0]]))
axes=line.get_axes()
labelLoc=(legendLoc[0 if isY1 else 1] if y2Exists>0 else 'best')
l=pylab.legend(loc=labelLoc)
if hasattr(l,'draggable'): l.draggable(True)
if scientific:
pylab.ticklabel_format(style='sci',scilimits=(0,0),axis='both')
# fixes scientific exponent placement for y2: https://sourceforge.net/mailarchive/forum.php?thread_name=20101223174750.GD28779%40ykcyc&forum_name=matplotlib-users
if not isY1: axes.yaxis.set_offset_position('right')
if isY1:
pylab.ylabel((', '.join([xlateLabel(_p[0]) for _p in ySpecs2])) if p not in xylabels or not xylabels[p][1] else xylabels[p][1])
pylab.xlabel(xlateLabel(pStrip) if (p not in xylabels or not xylabels[p][0]) else xylabels[p][0])
else:
pylab.ylabel((', '.join([xlateLabel(_p[0]) for _p in ySpecs2])) if (p not in xylabels or len(xylabels[p])<3 or not xylabels[p][2]) else xylabels[p][2])
# if there are callable/dict ySpecs, save them inside the axes object, so that the live updater can use those
if yNameFuncs:
axes.yadeYNames,axes.yadeYFuncs,axes.yadeXName,axes.yadeLabelLoc=yNames,yNameFuncs,pStrip,labelLoc # prepend yade to avoid clashes
createLines(pStrip,plots_p_y1,isY1=True,y2Exists=len(plots_p_y2)>0)
if axesWd>0:
pylab.axhline(linewidth=axesWd,color='k')
pylab.axvline(linewidth=axesWd,color='k')
# create y2 lines, if any
if len(plots_p_y2)>0:
pylab.twinx() # create the y2 axis
createLines(pStrip,plots_p_y2,isY1=False,y2Exists=True)
if 'title' in O.tags.keys(): pylab.title(O.tags['title'])
def liveUpdate(timestamp):
global liveTimeStamp
liveTimeStamp=timestamp
while True:
if not live or liveTimeStamp!=timestamp: return
figs,axes,linesData=set(),set(),set()
for l in currLineRefs:
l.update()
figs.add(l.line.get_figure())
axes.add(l.line.get_axes())
linesData.add(id(l.ydata))
# find callables in y specifiers, create new lines if necessary
for ax in axes:
if not hasattr(ax,'yadeYFuncs') or not ax.yadeYFuncs: continue # not defined of empty
yy=set();
for f in ax.yadeYFuncs:
if callable(f): yy.update(f())
elif hasattr(f,'keys'): yy.update(f.keys())
else: raise ValueError("Internal error: ax.yadeYFuncs items must be callables or dictionary-like objects and nothing else.")
#print 'callables y names:',yy
news=yy-ax.yadeYNames
if not news: continue
for new in news:
ax.yadeYNames.add(new)
if new in data.keys() and id(data[new]) in linesData: continue # do not add when reloaded and the old lines are already there
print 'yade.plot: creating new line for',new
if not new in data.keys(): data[new]=len(data[ax.yadeXName])*[nan] # create data entry if necessary
#print 'data',len(data[ax.yadeXName]),len(data[new]),data[ax.yadeXName],data[new]
line,=ax.plot(data[ax.yadeXName],data[new],label=xlateLabel(new)) # no line specifier
line2,=ax.plot([],[],color=line.get_color(),alpha=afterCurrentAlpha)
scatterPt=(0 if len(data[ax.yadeXName])==0 or math.isnan(data[ax.yadeXName][current]) else data[ax.yadeXName][current]),(0 if len(data[new])==0 or math.isnan(data[new][current]) else data[new][current])
scatter=ax.scatter(scatterPt[0],scatterPt[1],s=60,color=line.get_color(),**scatterMarkerKw)
currLineRefs.append(LineRef(line,scatter,line2,data[ax.yadeXName],data[new]))
ax.set_ylabel(ax.get_ylabel()+(', ' if ax.get_ylabel() else '')+xlateLabel(new))
# it is possible that the legend has not yet been created
l=ax.legend(loc=ax.yadeLabelLoc)
if hasattr(l,'draggable'): l.draggable(True)
if autozoom:
for ax in axes:
try:
ax.relim() # recompute axes limits
ax.autoscale_view()
except RuntimeError: pass # happens if data are being updated and have not the same dimension at the very moment
for fig in figs:
try:
fig.canvas.draw()
except RuntimeError: pass # happens here too
time.sleep(liveInterval)
def savePlotSequence(fileBase,stride=1,imgRatio=(5,7),title=None,titleFrames=20,lastFrames=30):
'''Save sequence of plots, each plot corresponding to one line in history. It is especially meant to be used for :yref:`yade.utils.makeVideo`.
:param stride: only consider every stride-th line of history (default creates one frame per each line)
:param title: Create title frame, where lines of title are separated with newlines (``\\n``) and optional subtitle is separated from title by double newline.
:param int titleFrames: Create this number of frames with title (by repeating its filename), determines how long the title will stand in the movie.
:param int lastFrames: Repeat the last frame this number of times, so that the movie does not end abruptly.
:return: List of filenames with consecutive frames.
'''
createPlots(subPlots=True,scatterSize=60,wider=True)
sqrtFigs=math.sqrt(len(plots))
pylab.gcf().set_size_inches(8*sqrtFigs,5*sqrtFigs) # better readable
pylab.subplots_adjust(left=.05,right=.95,bottom=.05,top=.95) # make it more compact
if len(plots)==1 and plots[plots.keys()[0]]==None: # only pure snapshot is there
pylab.gcf().set_size_inches(5,5)
pylab.subplots_adjust(left=0,right=1,bottom=0,top=1)
#if not data.keys(): raise ValueError("plot.data is empty.")
pltLen=max(len(data[data.keys()[0]]) if data else 0,len(imgData[imgData.keys()[0]]) if imgData else 0)
if pltLen==0: raise ValueError("Both plot.data and plot.imgData are empty.")
global current, currLineRefs
ret=[]
print 'Saving %d plot frames, it can take a while...'%(pltLen)
for i,n in enumerate(range(0,pltLen,stride)):
current=n
for l in currLineRefs: l.update()
out=fileBase+'-%03d.png'%i
pylab.gcf().savefig(out)
ret.append(out)
if len(ret)==0: raise RuntimeError("No images created?!")
if title:
titleImgName=fileBase+'-title.png'
createTitleFrame(titleImgName,Image.open(ret[-1]).size,title)
ret=titleFrames*[titleImgName]+ret
if lastFrames>1: ret+=(lastFrames-1)*[ret[-1]]
return ret
def createTitleFrame(out,size,title):
'create figure with title and save to file; a figure object must be opened to get the right size'
pylab.clf(); fig=pylab.gcf()
#insize=fig.get_size_inches(); size=insize[1]*fig.get_dpi(),insize[0]*fig.get_dpi() # this gives wrong dimensions...
#fig.set_facecolor('blue'); fig.patch.set_color('blue'); fig.patch.set_facecolor('blue'); fig.patch.set_alpha(None)
title,subtitle=title.split('\n\n')
lines=[(t,True) for t in title.split('\n')]+([(t,False) for t in subtitle.split('\n')] if subtitle else [])
nLines=len(lines); fontSizes=size[1]/10.,size[1]/16.
import matplotlib.mathtext
def writeLine(text,vertPos,fontsize):
rgba,depth=matplotlib.mathtext.MathTextParser('Bitmap').to_rgba(text,fontsize=fontsize,dpi=fig.get_dpi(),color='blue')
textsize=rgba.shape[1],rgba.shape[0]
if textsize[0]>size[0]:
rgba,depth=matplotlib.mathtext.MathTextParser('Bitmap').to_rgba(text,fontsize=fontsize*size[0]/textsize[0],dpi=fig.get_dpi(),color='blue')
textsize=rgba.shape[1],rgba.shape[0]
fig.figimage(rgba.astype(float)/255.,xo=(size[0]-textsize[0])/2.,yo=vertPos-depth)
ht=size[1]; y0=ht-2*fontSizes[0]; yStep=(ht-2.5*fontSizes[0])/len(lines)
for i,(l,isTitle) in enumerate(lines):
writeLine(l,y0-i*yStep,fontSizes[0 if isTitle else 1])
fig.savefig(out)
def plot(noShow=False,subPlots=True):
"""Do the actual plot, which is either shown on screen (and nothing is returned: if *noShow* is ``False`` - note that your yade compilation should present qt4 feature so that figures can be displayed) or, if *noShow* is ``True``, returned as matplotlib's Figure object or list of them.
You can use
>>> from yade import plot
>>> plot.resetData()
>>> plot.plots={'foo':('bar',)}
>>> plot.plot(noShow=True).savefig('someFile.pdf')
>>> import os
>>> os.path.exists('someFile.pdf')
True
to save the figure to file automatically.
.. note:: For backwards compatibility reasons, *noShow* option will return list of figures for multiple figures but a single figure (rather than list with 1 element) if there is only 1 figure.
"""
createPlots(subPlots=subPlots)
global currLineRefs
figs=set([l.line.get_axes().get_figure() for l in currLineRefs])
if not hasattr(list(figs)[0],'show') and not noShow:
import warnings
warnings.warn('plot.plot not showing figure (matplotlib using headless backend?)')
noShow=True
if not noShow:
if not yade.runtime.hasDisplay: return # would error out with some backends, such as Agg used in batches
if live:
import thread
thread.start_new_thread(liveUpdate,(time.time(),))
# pylab.show() # this blocks for some reason; call show on figures directly
for f in figs:
f.show()
# should have fixed https://bugs.launchpad.net/yade/+bug/606220, but does not work apparently
if 0:
import matplotlib.backend_bases
if 'CloseEvent' in dir(matplotlib.backend_bases):
def closeFigureCallback(event):
ff=event.canvas.figure
# remove closed axes from our update list
global currLineRefs
currLineRefs=[l for l in currLineRefs if l.line.get_axes().get_figure()!=ff]
f.canvas.mpl_connect('close_event',closeFigureCallback)
else:
figs=list(set([l.line.get_axes().get_figure() for l in currLineRefs]))
if len(figs)==1: return figs[0]
else: return figs
def saveDataTxt(fileName,vars=None):
"""Save plot data into a (optionally compressed) text file. The first line contains a comment (starting with ``#``) giving variable name for each of the columns. This format is suitable for being loaded for further processing (outside yade) with ``numpy.genfromtxt`` function, which recognizes those variable names (creating numpy array with named entries) and handles decompression transparently.
>>> from yade import plot
>>> from pprint import pprint
>>> plot.reset()
>>> plot.addData(a=1,b=11,c=21,d=31) # add some data here
>>> plot.addData(a=2,b=12,c=22,d=32)
>>> pprint(plot.data)
{'a': [1, 2], 'b': [11, 12], 'c': [21, 22], 'd': [31, 32]}
>>> plot.saveDataTxt('/tmp/dataFile.txt.bz2',vars=('a','b','c'))
>>> import numpy
>>> d=numpy.genfromtxt('/tmp/dataFile.txt.bz2',dtype=None,names=True)
>>> d['a']
array([1, 2])
>>> d['b']
array([11, 12])
:param fileName: file to save data to; if it ends with ``.bz2`` / ``.gz``, the file will be compressed using bzip2 / gzip.
:param vars: Sequence (tuple/list/set) of variable names to be saved. If ``None`` (default), all variables in :yref:`yade.plot.plot` are saved.
"""
import bz2,gzip
if not vars:
vars=data.keys(); vars.sort()
if fileName.endswith('.bz2'): f=bz2.BZ2File(fileName,'w')
elif fileName.endswith('.gz'): f=gzip.GzipFile(fileName,'w')
else: f=open(fileName,'w')
f.write("# "+"\t\t".join(vars)+"\n")
for i in range(len(data[vars[0]])):
f.write("\t".join([str(data[var][i]) for var in vars])+"\n")
f.close()
def savePylab(baseName,timestamp=False,title=None):
'''This function is not finished, do not use it.'''
import time
if len(data.keys())==0: raise RuntimeError("No data for plotting were saved.")
if timestamp: baseName+=_mkTimestamp()
baseNameNoPath=baseName.split('/')[-1]
saveDataTxt(fileName=baseName+'.data.bz2')
if len(plots)==0: raise RuntimeError("No plots to save, only data saved.")
py=file(baseName+'.py','w')
py.write('#!/usr/bin/env python\n# encoding: utf-8\n# created '+time.asctime()+' ('+time.strftime('%Y%m%d_%H:%M')+')\n#\nimport pylab, numpy\n')
py.write("data=numpy.genfromtxt('%s.data.bz2',dtype=None,names=True)\n"%baseName)
subCols=int(round(math.sqrt(len(plots)))); subRows=int(math.ceil(len(plots)*1./subCols))
for nPlot,p in enumerate(plots.keys()):
pStrip=p.strip().split('=',1)[0]
if plots[p]==None: continue # image plots, which is not exported
if len(plots)==1: py.write('pylab.figure()\n')
else: py.write('pylab.subplot(%d,%d,%d)\n'%(subRows,subCols,nPlots))
def _mkTimestamp():
import time
return time.strftime('_%Y%m%d_%H:%M')
def saveGnuplot(baseName,term='wxt',extension=None,timestamp=False,comment=None,title=None,varData=False):
"""Save data added with :yref:`yade.plot.addData` into (compressed) file and create .gnuplot file that attempts to mimick plots specified with :yref:`yade.plot.plots`.
:param baseName: used for creating baseName.gnuplot (command file for gnuplot), associated ``baseName.data.bz2`` (data) and output files (if applicable) in the form ``baseName.[plot number].extension``
:param term: specify the gnuplot terminal; defaults to ``x11``, in which case gnuplot will draw persistent windows to screen and terminate; other useful terminals are ``png``, ``cairopdf`` and so on
:param extension: extension for ``baseName`` defaults to terminal name; fine for png for example; if you use ``cairopdf``, you should also say ``extension='pdf'`` however
:param bool timestamp: append numeric time to the basename
:param bool varData: whether file to plot will be declared as variable or be in-place in the plot expression
:param comment: a user comment (may be multiline) that will be embedded in the control file
:return: name of the gnuplot file created.
"""
if len(data.keys())==0: raise RuntimeError("No data for plotting were saved.")
if timestamp: baseName+=_mkTimestamp()
baseNameNoPath=baseName.split('/')[-1]
vars=data.keys(); vars.sort()
saveDataTxt(fileName=baseName+'.data.bz2',vars=vars)
fPlot=file(baseName+".gnuplot",'w')
fPlot.write('#!/usr/bin/env gnuplot\n#\n# created '+time.asctime()+' ('+time.strftime('%Y%m%d_%H:%M')+')\n#\n')
if comment: fPlot.write('# '+comment.replace('\n','\n# ')+'#\n')
dataFile='"< bzcat %s.data.bz2"'%(baseNameNoPath)
if varData:
fPlot.write('dataFile=%s'%dataFile); dataFile='dataFile'
if not extension: extension=term
i=0
for p in plots:
pStrip=p.strip().split('=',1)[0]
if plots[p]==None: continue ## this plot is image plot, which is not applicable to gnuplot
plots_p=[addPointTypeSpecifier(o) for o in tuplifyYAxis(plots[p])]
if term in ['wxt','x11']: fPlot.write("set term %s %d persist\n"%(term,i))
else: fPlot.write("set term %s; set output '%s.%d.%s'\n"%(term,baseNameNoPath,i,extension))
fPlot.write("set xlabel '%s'\n"%xlateLabel(p))
fPlot.write("set grid\n")
fPlot.write("set datafile missing 'nan'\n")
if title: fPlot.write("set title '%s'\n"%title)
y1=True; plots_y1,plots_y2=[],[]
# replace callable/dict-like data specifiers by the results, it that particular data exists
plots_p2=[]
for pp in plots_p:
if callable(pp[0]): plots_p2+=[(ppp,'') for ppp in pp[0]() if ppp in data.keys()]
elif hasattr(pp[0],'keys'): plots_p2+=[(name,val) for name,val in pp[0].items() if name in data.keys()]
else: plots_p2.append((pp[0],pp[1]))
plots_p=plots_p2
#plots_p=sum([([(pp,'') for pp in p[0]() if pp in data.keys()] if callable(p[0]) else [(p[0],p[1])] ) for p in plots_p],[])
for d in plots_p:
if d[0]==None:
y1=False; continue
if y1: plots_y1.append(d)
else: plots_y2.append(d)
fPlot.write("set ylabel '%s'\n"%(','.join([xlateLabel(_p[0]) for _p in plots_y1])))
if len(plots_y2)>0:
fPlot.write("set y2label '%s'\n"%(','.join([xlateLabel(_p[0]) for _p in plots_y2])))
fPlot.write("set y2tics\n")
ppp=[]
for pp in plots_y1: ppp.append(" %s using %d:%d title '← %s(%s)' with lines"%(dataFile,vars.index(pStrip)+1,vars.index(pp[0])+1,xlateLabel(pp[0]),xlateLabel(pStrip),))
for pp in plots_y2: ppp.append(" %s using %d:%d title '%s(%s) →' with lines axes x1y2"%(dataFile,vars.index(pStrip)+1,vars.index(pp[0])+1,xlateLabel(pp[0]),xlateLabel(pStrip),))
fPlot.write("plot "+",".join(ppp)+"\n")
i+=1
fPlot.close()
return baseName+'.gnuplot'
| gpl-2.0 |
StongeEtienne/dipy | doc/examples/snr_in_cc.py | 11 | 6528 | """
=============================================
SNR estimation for Diffusion-Weighted Images
=============================================
Computing the Signal-to-Noise-Ratio (SNR) of DW images is still an open question,
as SNR depends on the white matter structure of interest as well as
the gradient direction corresponding to each DWI.
In classical MRI, SNR can be defined as the ratio of the mean
of the signal divided by the standard deviation of the
underlying Gaussian noise, that is SNR = mean(signal) / std(noise).
The noise standard deviation can be
computed from the background in any of the DW images. How do we compute
the mean of the signal, and what signal?
The strategy here is to compute a 'worst-case' SNR for DWI. Several white matter
structures such as the corpus callosum (CC), corticospinal tract (CST), or
the superior longitudinal fasciculus (SLF) can be easily identified from
the colored-FA (cfa) map. In this example, we will use voxels from the CC,
which have the characteristic of being highly RED in the cfa map since they are mainly oriented in
the left-right direction. We know that the DW image
closest to the x-direction will be the one with the most attenuated diffusion signal.
This is the strategy adopted in several recent papers (see [1]_ and [2]_). It gives a good
indication of the quality of the DWI data.
First, we compute the tensor model in a brain mask (see the DTI example for more explanation).
"""
from __future__ import division, print_function
import nibabel as nib
import numpy as np
from dipy.data import fetch_stanford_hardi, read_stanford_hardi
from dipy.segment.mask import median_otsu
from dipy.reconst.dti import TensorModel
fetch_stanford_hardi()
img, gtab = read_stanford_hardi()
data = img.get_data()
affine = img.get_affine()
print('Computing brain mask...')
b0_mask, mask = median_otsu(data)
print('Computing tensors...')
tenmodel = TensorModel(gtab)
tensorfit = tenmodel.fit(data, mask=mask)
"""Next, we set our red-green-blue thresholds to (0.6, 1) in the x axis
and (0, 0.1) in the y and z axes respectively.
These values work well in practice to isolate the very RED voxels of the cfa map.
Then, as assurance, we want just RED voxels in the CC (there could be
noisy red voxels around the brain mask and we don't want those). Unless the brain
acquisition was badly aligned, the CC is always close to the mid-sagittal slice.
The following lines perform these two operations and then saves the computed mask.
"""
print('Computing worst-case/best-case SNR using the corpus callosum...')
from dipy.segment.mask import segment_from_cfa
from dipy.segment.mask import bounding_box
threshold = (0.6, 1, 0, 0.1, 0, 0.1)
CC_box = np.zeros_like(data[..., 0])
mins, maxs = bounding_box(mask)
mins = np.array(mins)
maxs = np.array(maxs)
diff = (maxs - mins) // 4
bounds_min = mins + diff
bounds_max = maxs - diff
CC_box[bounds_min[0]:bounds_max[0],
bounds_min[1]:bounds_max[1],
bounds_min[2]:bounds_max[2]] = 1
mask_cc_part, cfa = segment_from_cfa(tensorfit, CC_box,
threshold, return_cfa=True)
cfa_img = nib.Nifti1Image((cfa*255).astype(np.uint8), affine)
mask_cc_part_img = nib.Nifti1Image(mask_cc_part.astype(np.uint8), affine)
nib.save(mask_cc_part_img, 'mask_CC_part.nii.gz')
import matplotlib.pyplot as plt
region = 40
fig = plt.figure('Corpus callosum segmentation')
plt.subplot(1, 2, 1)
plt.title("Corpus callosum (CC)")
plt.axis('off')
red = cfa[..., 0]
plt.imshow(np.rot90(red[region, ...]))
plt.subplot(1, 2, 2)
plt.title("CC mask used for SNR computation")
plt.axis('off')
plt.imshow(np.rot90(mask_cc_part[region, ...]))
fig.savefig("CC_segmentation.png", bbox_inches='tight')
"""
.. figure:: CC_segmentation.png
:align: center
"""
"""Now that we are happy with our crude CC mask that selected voxels in the x-direction,
we can use all the voxels to estimate the mean signal in this region.
"""
mean_signal = np.mean(data[mask_cc_part], axis=0)
"""Now, we need a good background estimation. We will re-use the brain mask
computed before and invert it to catch the outside of the brain. This could
also be determined manually with a ROI in the background.
[Warning: Certain MR manufacturers mask out the outside of the brain with 0's.
One thus has to be careful how the noise ROI is defined].
"""
from scipy.ndimage.morphology import binary_dilation
mask_noise = binary_dilation(mask, iterations=10)
mask_noise[..., :mask_noise.shape[-1]//2] = 1
mask_noise = ~mask_noise
mask_noise_img = nib.Nifti1Image(mask_noise.astype(np.uint8), affine)
nib.save(mask_noise_img, 'mask_noise.nii.gz')
noise_std = np.std(data[mask_noise, :])
print('Noise standard deviation sigma= ', noise_std)
"""We can now compute the SNR for each DWI. For example, report SNR
for DW images with gradient direction that lies the closest to
the X, Y and Z axes.
"""
# Exclude null bvecs from the search
idx = np.sum(gtab.bvecs, axis=-1) == 0
gtab.bvecs[idx] = np.inf
axis_X = np.argmin(np.sum((gtab.bvecs-np.array([1, 0, 0]))**2, axis=-1))
axis_Y = np.argmin(np.sum((gtab.bvecs-np.array([0, 1, 0]))**2, axis=-1))
axis_Z = np.argmin(np.sum((gtab.bvecs-np.array([0, 0, 1]))**2, axis=-1))
for direction in [0, axis_X, axis_Y, axis_Z]:
SNR = mean_signal[direction]/noise_std
if direction == 0 :
print("SNR for the b=0 image is :", SNR)
else :
print("SNR for direction", direction, " ", gtab.bvecs[direction], "is :", SNR)
"""SNR for the b=0 image is : ''42.0695455758''"""
"""SNR for direction 58 [ 0.98875 0.1177 -0.09229] is : ''5.46995373635''"""
"""SNR for direction 57 [-0.05039 0.99871 0.0054406] is : ''23.9329492871''"""
"""SNR for direction 126 [-0.11825 -0.039925 0.99218 ] is : ''23.9965694823''"""
"""
Since the CC is aligned with the X axis, the lowest SNR is for that gradient
direction. In comparison, the DW images in
the perpendical Y and Z axes have a high SNR. The b0 still exhibits the highest SNR,
since there is no signal attenuation.
Hence, we can say the Stanford diffusion
data has a 'worst-case' SNR of approximately 5, a
'best-case' SNR of approximately 24, and a SNR of 42 on the b0 image.
"""
"""
References:
.. [1] Descoteaux, M., Deriche, R., Le Bihan, D., Mangin, J.-F., and Poupon, C.
Multiple q-shell diffusion propagator imaging.
Medical image analysis, 15(4), 603, 2011.
.. [2] Jones, D. K., Knosche, T. R., & Turner, R.
White Matter Integrity, Fiber Count, and Other Fallacies: The Dos and Don'ts of Diffusion MRI.
NeuroImage, 73, 239, 2013.
"""
| bsd-3-clause |
larsoner/mne-python | mne/utils/__init__.py | 4 | 4476 | # # # WARNING # # #
# This list must also be updated in doc/_templates/autosummary/class.rst if it
# is changed here!
_doc_special_members = ('__contains__', '__getitem__', '__iter__', '__len__',
'__add__', '__sub__', '__mul__', '__div__',
'__neg__', '__hash__')
from ._bunch import Bunch, BunchConst, BunchConstNamed
from .check import (check_fname, check_version, check_random_state,
_check_fname, _check_subject, _check_pandas_installed,
_check_pandas_index_arguments, _check_mayavi_version,
_check_event_id, _check_ch_locs, _check_compensation_grade,
_check_if_nan, _is_numeric, _ensure_int, _check_preload,
_validate_type, _check_info_inv, _check_pylsl_installed,
_check_channels_spatial_filter, _check_one_ch_type,
_check_rank, _check_option, _check_depth, _check_combine,
_check_path_like, _check_src_normal, _check_stc_units,
_check_pyqt5_version, _check_sphere, _check_time_format,
_check_freesurfer_home, _suggest, _require_version,
_on_missing, int_like, _safe_input)
from .config import (set_config, get_config, get_config_path, set_cache_dir,
set_memmap_min_size, get_subjects_dir, _get_stim_channel,
sys_info, _get_extra_data_path, _get_root_dir,
_get_numpy_libs)
from .docs import (copy_function_doc_to_method_doc, copy_doc, linkcode_resolve,
open_docs, deprecated, fill_doc, deprecated_alias,
copy_base_doc_to_subclass_doc)
from .fetching import _fetch_file, _url_to_local_path
from ._logging import (verbose, logger, set_log_level, set_log_file,
use_log_level, catch_logging, warn, filter_out_warnings,
ETSContext, wrapped_stdout, _get_call_line,
ClosingStringIO)
from .misc import (run_subprocess, _pl, _clean_names, pformat, _file_like,
_explain_exception, _get_argvalues, sizeof_fmt,
running_subprocess, _DefaultEventParser,
_assert_no_instances)
from .progressbar import ProgressBar
from ._testing import (run_tests_if_main, run_command_if_main,
requires_sklearn,
requires_version, requires_nibabel, requires_mayavi,
requires_good_network, requires_mne, requires_pandas,
requires_h5py, traits_test, requires_pysurfer,
ArgvSetter, SilenceStdout, has_freesurfer, has_mne_c,
_TempDir, has_nibabel, _import_mlab, buggy_mkl_svd,
requires_numpydoc, requires_vtk, requires_freesurfer,
requires_nitime, requires_dipy,
requires_neuromag2ft, requires_pylsl, assert_object_equal,
assert_and_remove_boundary_annot, _raw_annot,
assert_dig_allclose, assert_meg_snr, assert_snr,
assert_stcs_equal, modified_env, _click_ch_name,
_close_event)
from .numerics import (hashfunc, _compute_row_norms,
_reg_pinv, random_permutation, _reject_data_segments,
compute_corr, _get_inst_data, array_split_idx,
sum_squared, split_list, _gen_events, create_slices,
_time_mask, _freq_mask, grand_average, object_diff,
object_hash, object_size, _apply_scaling_cov,
_undo_scaling_cov, _apply_scaling_array,
_undo_scaling_array, _scaled_array, _replace_md5, _PCA,
_mask_to_onsets_offsets, _array_equal_nan,
_julian_to_cal, _cal_to_julian, _dt_to_julian,
_julian_to_dt, _dt_to_stamp, _stamp_to_dt,
_check_dt, _ReuseCycle)
from .mixin import (SizeMixin, GetEpochsMixin, _prepare_read_metadata,
_prepare_write_metadata, _FakeNoPandas, ShiftTimeMixin)
from .linalg import (_svd_lwork, _repeated_svd, _sym_mat_pow, sqrtm_sym,
dgesdd, dgemm, zgemm, dgemv, ddot, LinAlgError, eigh)
from .dataframe import (_set_pandas_dtype, _scale_dataframe_data,
_convert_times, _build_data_frame)
| bsd-3-clause |
daniloefl/Unfolder | toyModel/closureTest.py | 1 | 1560 | #!/usr/bin/env python3
import itertools
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pymc3 as pm
import matplotlib.cm as cm
import scipy
from Unfolder.ComparisonHelpers import *
from Unfolder.Unfolder import Unfolder
from Unfolder.Histogram import H1D, H2D, plotH1D, plotH2D
from readHistograms import *
sns.set(context = "paper", style = "whitegrid", font_scale=1.1)
varname = "observable"
extension = "eps"
# get histograms from file
truth, recoWithFakes, bkg, mig, eff, nrt = getHistograms(direc = "A")
recoWithoutFakes = mig.project("y")
eff_noerr = H1D(eff)
for k in range(0, len(eff_noerr.err)):
eff_noerr.err[k] = 0
bkg_noerr = H1D(bkg)
for k in range(0, len(bkg_noerr.err)):
bkg_noerr.err[k] = 0
# generate fake data
data = recoWithFakes
# Create unfolding class
m = Unfolder(bkg, mig, eff, truth)
m.setUniformPrior()
#m.setGaussianPrior()
#m.setCurvaturePrior()
#m.setFirstDerivativePrior()
m.run(data)
m.setAlpha(1.0)
m.sample(50000)
# plot marginal distributions
m.plotMarginal("plotMarginal.%s" % extension)
# plot correlations
#m.plotPairs("pairPlot.%s" % extension) # takes forever
m.plotCov("covPlot.%s" % extension)
m.plotCorr("corrPlot.%s" % extension)
m.plotCorrWithNP("corrPlotWithNP.%s" % extension)
m.plotSkewness("skewPlot.%s" % extension)
m.plotKurtosis("kurtosisPlot.%s" % extension)
m.plotNP("plotNP.%s" % extension)
# plot unfolded spectrum
m.plotUnfolded("plotUnfolded.%s" % extension)
m.plotOnlyUnfolded(1.0, False, "", "plotOnlyUnfolded.%s" % extension)
| gpl-3.0 |
dataculture/mca | tests/test_mca.py | 2 | 9155 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_mca
----------------------------------
Tests for `mca` module.
"""
import unittest
from numpy.testing import assert_allclose
from numpy import array
import pandas
from mca import MCA
class TestMca(unittest.TestCase):
def test_abdi_valentin(self):
# Data taken from http://www.utdallas.edu/~herve/Abdi-MCA2007-pretty.pdf
# Multiple Correspondence Analysis
# (Hervé Abdi & Dominique Valentin, 2007)
# See in particular Table 2,3,4.
# first we check the eigenvalues and factor scores with Benzecri
# correction
df = pandas.read_table('data/burgundies.csv', skiprows=1, sep=',',
index_col=0)
mca_df = MCA(df.drop('oak_type', axis=1), ncols=10)
assert_allclose([0.7004, 0.0123, 0.0003], mca_df.E[:3], atol=1e-4)
true_fs_row = [[0.86, 0.08], [-0.71, -0.16], [-0.92, 0.08],
[-0.86, 0.08], [0.92, 0.08], [0.71, -0.16]]
assert_allclose(true_fs_row, mca_df.fs_r(N=2), atol=1e-2)
true_fs_col = [[.90, -.90, -.97, .00, .97, -.90, .90, .90, -.90, -.9,
.90, -.97, .00, .97, -.90, .90, .28, -.28, -.90, .90,
-.90, .9, .90, -.90],
[.00, .00, .18, -.35, .18, .00, .00, .00, .0, .00, .00,
.18, -.35, .18, .00, .00, .0, .00, .00, .00, .00, .00,
.00, .00]]
assert_allclose(array(true_fs_col).T[:-2], mca_df.fs_c(N=2), atol=1e-2)
true_cont_r = [[177, 121, 202, 177, 202, 121],
[83, 333, 83, 83, 83, 333]]
assert_allclose(true_cont_r, 1000*mca_df.cont_r(N=2).T, atol=1)
true_cont_c = [[58, 58, 44, 0, 44, 58, 58, 58, 58, 58, 58, 44, 0, 44,
58, 58, 6, 6, 58, 58, 58, 58],
[0, 0, 83, 333, 83, 0, 0, 0, 0, 0, 0, 83, 333, 83, 0,
0, 0, 0, 0, 0, 0, 0]]
assert_allclose(true_cont_c, 1000*mca_df.cont_c(N=2).T, atol=1)
# I declined to include a test for the cos_c and cos_r functions because
# I think the source itself is mistaken. In Abdi-MCA2007-pretty.pdf as in
# elsewhere the formula for the squared cosine is f**2/d**2. This does not
# agree with tables 3 and 4. In table 3 the squared cosine is derived from
# f**2/I where I = 1.2 is the inertia before Benzecri correction. I have no
# idea how the squared cosines in table 4 were derived. My formula, however
# does comport with the figures given in (Abdi & Bera, 2014), tested next.
# oak = pandas.DataFrame([1,2,2,2,1,1], columns=['oak_type'])
# print(dummy(oak))
# mca_df.fs_c_sup(dummy(oak))
# ... then without Benzecri correction
mca_df_i = MCA(df.drop('oak_type', axis=1), ncols=10, benzecri=False)
assert_allclose([0.8532, 0.2, 0.1151, 0.0317],
(mca_df_i.s**2)[:4], atol=1e-4)
# check percentage of explained variance both with and without Benzecri
# and Greenacre corrections
true_expl_var_i = [.7110, .1667, .0959, .0264, 0., 0.]
true_expl_var_z = [.9823, .0173, .0004, 0., 0., 0.]
true_expl_var_c = [.9519, .0168, .0004, 0., 0., 0.]
assert_allclose(mca_df_i.expl_var(False), true_expl_var_i, atol=1e-4)
assert_allclose(mca_df_i.expl_var(), true_expl_var_c, atol=1e-4)
assert_allclose(mca_df.expl_var(False), true_expl_var_z, atol=1e-4)
assert_allclose(mca_df.expl_var(), true_expl_var_c, atol=1e-4)
def test_abdi_bera(self):
# Data taken from www.utdallas.edu/~herve/abdi-AB2014_CA.pdf
# Correspondence Analysis, (Herve Abdi & Michel Bera, 2014)
# Springer Encyclopedia of Social Networks and Mining.
df = pandas.read_table('data/music_color.csv', skiprows=0, index_col=0,
sep=',')
mca_df = MCA(df, benzecri=False)
# Table 1, page 13
assert_allclose(mca_df.r, [.121, .091, .126, .116, .096, .066, .071,
.146, .061, .106], atol=1e-3)
assert_allclose(mca_df.c, [.11, .11, .11, .11, .11, .11, .11, .11, .11],
atol=1e-2)
# Table 2, page 14
assert_allclose(mca_df.fs_r(N=2), [[-0.026, 0.299], [-0.314, 0.232],
[-0.348, 0.202], [-0.044, -0.490],
[-0.082, -0.206], [-0.619, 0.475],
[-0.328, 0.057], [1.195, 0.315],
[-0.57, 0.3], [0.113, -0.997]],
atol=1e-3)
assert_allclose(mca_df.cont_r(N=2)*1000, [[0, 56], [31, 25], [53, 27],
[1, 144], [2, 21], [87, 77],
[26, 1], [726, 75], [68, 28],
[5, 545]],
atol=1)
assert_allclose(mca_df.cos_r(N=2)*1000,
[[3, 410], [295, 161], [267, 89], [5, 583], [13, 81],
[505, 298], [77, 2], [929, 65], [371, 103], [12, 973]],
atol=1)
# Table 3, page 17
assert_allclose(mca_df.fs_c(N=2),
[[-0.541, 0.386], [-.257, .275], [-.291, -.309],
[.991, .397], [-.122, -.637], [-.236, .326],
[.954, -.089], [-.427, .408], [-.072, -.757]],
atol=1e-3)
assert_allclose(mca_df.cont_c(N=2)*1000,
[[113, 86], [25, 44], [33, 55],
[379, 91], [6, 234], [22, 61],
[351, 5], [70, 96], [2, 330]],
atol=1)
assert_allclose(mca_df.cos_c(N=2)*1000,
[[454, 232], [105, 121], [142, 161], [822, 132],
[26, 709], [78, 149], [962, 8], [271, 249], [7, 759]],
atol=1)
assert_allclose(mca_df.L[:2], [.287, .192], atol=2e-3)
self.assertAlmostEqual(mca_df.inertia, 0.746, 3)
def test_abdi_williams(self):
# Data taken from www.utdallas.edu/~herve/abdi-CorrespondenceAnaysis2010-pretty.pdf
# Correspondence Analysis, (Herve Abdi & Michel Bera, 2010)
# SAGE Encyclopedia of Research Design. Table 4, page 16.
df = pandas.read_table('data/french_writers.csv', skiprows=0,
index_col=0, sep=',')
mca_df = MCA(df, benzecri=False)
assert_allclose(mca_df.c, [.2973, .5642, .1385], atol=1e-4)
assert_allclose(mca_df.r, [.0189, .1393, .2522, .3966, .1094, .0835],
atol=1e-4)
true_fs_row = [[0.2398, 0.1895, 0.1033, -0.0918, -0.2243, 0.0475],
[0.0741, 0.1071, -0.0297, 0.0017, 0.0631, -0.1963]]
assert_allclose(mca_df.fs_r(N=2).T, true_fs_row, atol=1e-4)
assert_allclose(mca_df.L, [.0178, .0056], atol=1e-4)
assert_allclose(-mca_df.fs_c(N=2).T, [[-0.0489, 0.0973, -0.2914],
[.1115, -0.0367, -0.0901]],
atol=1e-4)
true_cont_r = [[0.0611, 0.2807, 0.1511, 0.1876, 0.3089, 0.0106],
[0.0186, 0.2864, 0.0399, 0.0002, 0.0781, 0.5767]]
assert_allclose(mca_df.cont_r(N=2).T, true_cont_r, atol=1e-4)
true_cos_r = [[0.9128, 0.7579, 0.9236, 0.9997, 0.9266, 0.0554],
[0.0872, 0.2421, 0.0764, 0.0003, 0.0734, 0.9446]]
assert_allclose(mca_df.cos_r(N=2).T, true_cos_r, atol=1e-4)
assert_allclose(mca_df.cont_c(N=2).T, [[0.0399, 0.2999, 0.6601],
[0.6628, 0.1359, 0.2014]],
atol=1e-4)
assert_allclose(mca_df.cos_c(N=2).T, [[0.1614, 0.8758, 0.9128],
[0.8386, 0.1242, 0.0872]],
atol=1e-4)
assert_allclose(mca_df.dc, [0.0148, 0.0108, 0.0930], atol=1e-4)
assert_allclose(mca_df.dr,
[0.0630, 0.0474, 0.0116, 0.0084, 0.0543, 0.0408],
atol=1e-4)
# abdi = numpy.array([216, 139, 26]) #
abdi = pandas.DataFrame([216, 139, 26]).T
assert_allclose(mca_df.fs_r_sup(abdi, 2), [[-0.0908, 0.5852]],
atol=1e-4)
supp = pandas.read_table('data/french_writers_supp.csv', skiprows=0,
index_col=0, sep=',')
true_fs_col_sup = [[-0.0596, -0.1991, -0.4695, -0.4008],
[0.2318, 0.2082, -0.2976, -0.4740]]
assert_allclose(mca_df.fs_c_sup(supp).T, true_fs_col_sup, atol=1e-3)
def test_invalid_inputs(self):
df = pandas.read_table('data/burgundies.csv', skiprows=1, sep=',')
self.assertRaises(ValueError, MCA, df.iloc[:, 2:], ncols=0)
self.assertRaises(ValueError, MCA, df.iloc[:, 2:], ncols='')
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
kapteyn-astro/kapteyn | doc-org/conf.py | 1 | 10347 | # -*- coding: utf-8 -*-
#
# Kapteyn Package documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 3 10:34:58 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# NOTES:
# ------
# * This configuration file has been modified on various occasions.
#
# * Building the documentation requires a lot of memory; at the moment
# 6 GB is sufficient. Machines with less memory can easily be hung up.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('../PACKAGE/kapteyn'))
sys.path.append(os.path.abspath('.'))
from kapteyn import __version__ as kapteynversion
print 'Kapteyn Package version', kapteynversion
# =============================================================================
# -- Options to choose from ---------------------------------------------------
# =============================================================================
# -- renderer for mathematical formulas:
#
math_ext = 'sphinx.ext.mathjax' # MathJax (JavaCript-based)
# explicit Kapteyn webserver
# mathjax_path = 'http://www.astro.rug.nl/MathJax/MathJax.js?config=TeX-AMS-MML_HTMLorMML'
# CDN server:
# mathjax_path = 'http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML'
if math_ext=='sphinx.ext.mathjax':
# same server:
mathjax_path = '/MathJax/MathJax.js?config=TeX-AMS-MML_HTMLorMML'
#math_ext = 'sphinx.ext.pngmath' # PNG images
# -- plot directive script:
#
#plot_ext = 'matplotlib.sphinxext.plot_directive' # 'official'
plot_ext = 'plot_directive' # local
# =============================================================================
# -- General configuration ----------------------------------------------------
# =============================================================================
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', math_ext, 'sphinx.ext.ifconfig',
plot_ext, 'only_directives']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'Kapteyn Package'
copyright = u'2010-2015, Kapteyn Astronomical Institute'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = kapteynversion
# The full version, including alpha/beta/rc tags.
release = kapteynversion
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
unused_docs = ['ascarray']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# =============================================================================
# -- Options for HTML output ---------------------------------------------------
# =============================================================================
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
#html_theme = 'default'
html_theme = 'kapteyn-theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'stickysidebar': True,
'rightsidebar': True,
'relbarbgcolor': 'brown',
'sidebarbgcolor': 'darkgreen',
'headtextcolor': 'black'
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
html_theme_path = ['.']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'Kapteyn Package'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
###html_logo = 'KapteynLogo-s.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'Kapteyn.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
html_sidebars = {'index': ['indexsidebar.html', 'sidebarextra.html',
'searchbox.html', 'sidebarbottom.html'],
'**': ['localtoc.html', 'searchbox.html',
'sidebarextra.html', 'sidebarbottom.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
html_additional_pages = {'index': 'index.html'}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'kapteyn'
#
# Transparent formulas. Note: different from Sphinx documentation
# stating '-bg Transparent' which did not work. May be due to the
# subprocess.Popen class. -D reduced because of smaller font used.
#
pngmath_dvipng_args = ['-gamma 1.5', '-D 95', '-bg', 'Transparent']
# =============================================================================
# -- Options for LaTeX output -------------------------------------------------
# =============================================================================
# fancy chapter headers:
fncychap = """\\usepackage[Conny]{fncychap}
\\ChTitleAsIs
\\ChRuleWidth{1pt}"""
latex_elements = {
'papersize' : 'a4paper',
'pointsize' : '10pt',
'fncychap' : fncychap # fancy chapter headers
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('contents', 'kapteyn.tex', u'Kapteyn Package Documentation',
u'Hans Terlouw\\\\Martin Vogelaar', 'manual', False),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = 'KapteynLogo.jpg'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
latex_use_parts = True
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
latex_domain_indices = False
# =============================================================================
# -- Special configuration and modifications ---------------------------------
# =============================================================================
# ----------------------------------------------------------------------------
# Plot directive configuration
# ----------------------------------------------------------------------------
plot_formats = [('png', 80), ('hires.png', 200), ('pdf', 50)]
plot_rcparams = {'figure.figsize': (5.5, 4.5)}
# Introduce 'author' directive
#
from docutils.parsers.rst import directives
from sphinx.directives import Author
directives.register_directive('author', Author)
| bsd-3-clause |
UDST/orca | orca/server/tests/test_server.py | 2 | 10740 | import json
import orca
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pytest
from .. import server
@pytest.fixture
def tapp():
server.app.config['TESTING'] = True
return server.app.test_client()
@pytest.fixture(scope='module')
def dfa():
return pd.DataFrame(
{'a': [100, 200, 300, 200, 100]},
index=['v', 'w', 'x', 'y', 'z'])
@pytest.fixture(scope='module')
def dfb():
return pd.DataFrame(
{'b': [70, 80, 90],
'a_id': ['w', 'v', 'z']},
index=['a', 'b', 'b'])
@pytest.fixture(scope='module')
def dfa_col(dfa):
return pd.Series([2, 4, 6, 8, 10], index=dfa.index)
@pytest.fixture(scope='module')
def dfb_col(dfb):
return pd.Series([10, 20, 30], index=dfb.index)
@pytest.fixture(scope='module')
def dfa_factor():
return 0.5
@pytest.fixture(scope='module')
def dfb_factor():
return 2
@pytest.fixture(scope='module', autouse=True)
def setup_orca(dfa, dfb, dfa_col, dfb_col, dfa_factor, dfb_factor):
orca.add_injectable('a_factor', dfa_factor)
@orca.injectable()
def b_factor():
return dfb_factor
orca.add_table('dfa', dfa)
@orca.table('dfb')
def dfb_table():
return dfb
orca.add_column('dfa', 'acol', dfa_col)
orca.add_column('dfb', 'bcol', dfb_col)
@orca.column('dfa')
def extra_acol(a_factor):
return dfa_col * a_factor
@orca.column('dfb')
def extra_bcol(b_factor):
return dfb_col * b_factor
orca.broadcast('dfb', 'dfa', cast_on='a_id', onto_index=True)
@orca.step()
def test_step(dfa, dfb):
pass
def test_schema(tapp):
rv = tapp.get('/schema')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert set(data['tables']) == {'dfa', 'dfb'}
assert set(data['columns']['dfa']) == {'extra_acol', 'acol', 'a'}
assert set(data['columns']['dfb']) == {'bcol', 'extra_bcol', 'a_id', 'b'}
assert data['steps'] == ['test_step']
assert set(data['injectables']) == {'a_factor', 'b_factor'}
assert data['broadcasts'] == [['dfb', 'dfa']]
def test_list_tables(tapp):
rv = tapp.get('/tables')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert set(data['tables']) == {'dfa', 'dfb'}
def test_table_info(tapp):
rv = tapp.get('/tables/dfa/info')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert 'extra_acol' in data
def test_table_preview(tapp):
rv = tapp.get('/tables/dfa/preview')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert data == orca.get_table('dfa').to_frame().to_json(orient='split')
def test_table_preview_404(tapp):
rv = tapp.get('/tables/not_a_table/preview')
assert rv.status_code == 404
def test_table_describe(tapp):
rv = tapp.get('/tables/dfa/describe')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert data == (orca.get_table('dfa')
.to_frame()
.describe()
.to_json(orient='split'))
def test_table_definition_frame(tapp):
rv = tapp.get('/tables/dfa/definition')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data == {'type': 'dataframe'}
def test_table_definition_func(tapp):
rv = tapp.get('/tables/dfb/definition')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['type'] == 'function'
assert data['filename'].endswith('test_server.py')
assert isinstance(data['lineno'], int)
assert data['text'] == (
" @orca.table('dfb')\n"
" def dfb_table():\n"
" return dfb\n")
assert 'dfb_table' in data['html']
def test_table_csv(tapp):
rv = tapp.get('/tables/dfb/csv')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert rv.mimetype == 'text/csv'
assert data == orca.get_table('dfb').to_frame().to_csv()
def test_list_table_columns(tapp):
rv = tapp.get('/tables/dfb/columns')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert set(data['columns']) == {'a_id', 'b', 'bcol', 'extra_bcol'}
def test_column_definition_local(tapp):
rv = tapp.get('/tables/dfa/columns/a/definition')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data == {'type': 'local'}
def test_column_definition_series(tapp):
rv = tapp.get('/tables/dfa/columns/acol/definition')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data == {'type': 'series'}
def test_column_definition_func(tapp):
rv = tapp.get('/tables/dfa/columns/extra_acol/definition')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['type'] == 'function'
assert data['filename'].endswith('test_server.py')
assert isinstance(data['lineno'], int)
assert data['text'] == (
" @orca.column('dfa')\n"
" def extra_acol(a_factor):\n"
" return dfa_col * a_factor\n")
assert 'extra_acol' in data['html']
def test_column_describe(tapp):
rv = tapp.get('/tables/dfa/columns/extra_acol/describe')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert data == (orca.get_table('dfa')
.extra_acol.describe()
.to_json(orient='split'))
def test_column_csv(tapp, dfa):
rv = tapp.get('/tables/dfa/columns/a/csv')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
assert data == dfa.a.to_csv(path_or_buf=None)
def test_no_column_404(tapp):
rv = tapp.get('/tables/dfa/columns/not-a-column/csv')
assert rv.status_code == 404
def test_list_injectables(tapp):
rv = tapp.get('/injectables')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert set(data['injectables']) == {'a_factor', 'b_factor'}
def test_injectable_repr(tapp, dfb_factor):
rv = tapp.get('/injectables/b_factor/repr')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data == {'type': str(type(42)), 'repr': '2'}
def test_no_injectable_404(tapp):
rv = tapp.get('/injectables/nope/repr')
assert rv.status_code == 404
def test_injectable_definition_var(tapp):
rv = tapp.get('/injectables/a_factor/definition')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data == {'type': 'variable'}
def test_injectable_definition_func(tapp):
rv = tapp.get('/injectables/b_factor/definition')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['type'] == 'function'
assert data['filename'].endswith('test_server.py')
assert isinstance(data['lineno'], int)
assert data['text'] == (
" @orca.injectable()\n"
" def b_factor():\n"
" return dfb_factor\n")
assert 'b_factor' in data['html']
def test_list_broadcasts(tapp):
rv = tapp.get('/broadcasts')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data == {'broadcasts': [{'cast': 'dfb', 'onto': 'dfa'}]}
def test_broadcast_definition(tapp):
rv = tapp.get('/broadcasts/dfb/dfa/definition')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data == {
'cast': 'dfb',
'onto': 'dfa',
'cast_on': 'a_id',
'onto_on': None,
'cast_index': False,
'onto_index': True}
def test_no_broadcast_404(tapp):
rv = tapp.get('/broadcasts/table1/table2/definition')
assert rv.status_code == 404
def test_list_steps(tapp):
rv = tapp.get('/steps')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data == {'steps': ['test_step']}
def test_no_step_404(tapp):
rv = tapp.get('/steps/not_a_step/definition')
assert rv.status_code == 404
def test_step_definition(tapp):
rv = tapp.get('/steps/test_step/definition')
assert rv.status_code == 200
data = json.loads(rv.data.decode('utf-8'))
assert data['filename'].endswith('test_server.py')
assert isinstance(data['lineno'], int)
assert data['text'] == (
" @orca.step()\n"
" def test_step(dfa, dfb):\n"
" pass\n")
assert 'test_step' in data['html']
def test_table_groupbyagg_errors(tapp):
# non-existant column
rv = tapp.get('/tables/dfa/groupbyagg?column=notacolumn')
assert rv.status_code == 400
# both by and level missing
rv = tapp.get('/tables/dfa/groupbyagg?column=a')
assert rv.status_code == 400
# bad or missing agg type
rv = tapp.get('/tables/dfa/groupbyagg?column=a&level=0&agg=notanagg')
assert rv.status_code == 400
def test_table_groupbyagg_by_size(tapp):
rv = tapp.get('/tables/dfa/groupbyagg?by=a&column=a&agg=size')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
test = pd.read_json(data, orient='split', typ='series')
pdt.assert_series_equal(
test,
pd.Series([2, 2, 1], index=[100, 200, 300]),
check_names=False)
def test_table_groupbyagg_level_mean(tapp):
rv = tapp.get('/tables/dfb/groupbyagg?level=0&column=b&agg=mean')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
test = pd.read_json(data, orient='split', typ='series')
pdt.assert_series_equal(
test,
pd.Series([70, 85], index=['a', 'b'], name='b'))
def test_table_groupbyagg_level_median(tapp):
rv = tapp.get('/tables/dfb/groupbyagg?level=0&column=b&agg=median')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
test = pd.read_json(data, orient='split', typ='series')
pdt.assert_series_equal(
test,
pd.Series([70, 85], index=['a', 'b'], name='b'))
def test_table_groupbyagg_level_sum(tapp):
rv = tapp.get('/tables/dfb/groupbyagg?level=0&column=b&agg=sum')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
test = pd.read_json(data, orient='split', typ='series')
pdt.assert_series_equal(
test,
pd.Series([70, 170], index=['a', 'b'], name='b'))
def test_table_groupbyagg_level_std(tapp):
rv = tapp.get('/tables/dfb/groupbyagg?level=0&column=b&agg=std')
assert rv.status_code == 200
data = rv.data.decode('utf-8')
test = pd.read_json(data, orient='split', typ='series')
pdt.assert_series_equal(
test,
pd.Series(
[np.nan, pd.Series([80, 90]).std()],
index=['a', 'b'], name='b'))
| bsd-3-clause |
detrout/debian-statsmodels | statsmodels/tsa/tests/test_seasonal.py | 27 | 9216 | import numpy as np
from numpy.testing import assert_almost_equal, assert_equal, assert_raises
from statsmodels.tsa.seasonal import seasonal_decompose
from pandas import DataFrame, DatetimeIndex
class TestDecompose:
@classmethod
def setupClass(cls):
# even
data = [-50, 175, 149, 214, 247, 237, 225, 329, 729, 809,
530, 489, 540, 457, 195, 176, 337, 239, 128, 102,
232, 429, 3, 98, 43, -141, -77, -13, 125, 361, -45, 184]
cls.data = DataFrame(data, DatetimeIndex(start='1/1/1951',
periods=len(data),
freq='Q'))
def test_ndarray(self):
res_add = seasonal_decompose(self.data.values, freq=4)
seasonal = [62.46, 86.17, -88.38, -60.25, 62.46, 86.17, -88.38,
-60.25, 62.46, 86.17, -88.38, -60.25, 62.46, 86.17,
-88.38, -60.25, 62.46, 86.17, -88.38, -60.25,
62.46, 86.17, -88.38, -60.25, 62.46, 86.17, -88.38,
-60.25, 62.46, 86.17, -88.38, -60.25]
trend = [np.nan, np.nan, 159.12, 204.00, 221.25, 245.12, 319.75,
451.50, 561.12, 619.25, 615.62, 548.00, 462.12, 381.12,
316.62, 264.00, 228.38, 210.75, 188.38, 199.00, 207.12,
191.00, 166.88, 72.00, -9.25, -33.12, -36.75, 36.25,
103.00, 131.62, np.nan, np.nan]
random = [np.nan, np.nan, 78.254, 70.254, -36.710, -94.299, -6.371,
-62.246, 105.415, 103.576, 2.754, 1.254, 15.415, -10.299,
-33.246, -27.746, 46.165, -57.924, 28.004, -36.746,
-37.585, 151.826, -75.496, 86.254, -10.210, -194.049,
48.129, 11.004, -40.460, 143.201, np.nan, np.nan]
assert_almost_equal(res_add.seasonal, seasonal, 2)
assert_almost_equal(res_add.trend, trend, 2)
assert_almost_equal(res_add.resid, random, 3)
res_mult = seasonal_decompose(np.abs(self.data.values), 'm', freq=4)
seasonal = [1.0815, 1.5538, 0.6716, 0.6931, 1.0815, 1.5538, 0.6716,
0.6931, 1.0815, 1.5538, 0.6716, 0.6931, 1.0815, 1.5538,
0.6716, 0.6931, 1.0815, 1.5538, 0.6716, 0.6931, 1.0815,
1.5538, 0.6716, 0.6931, 1.0815, 1.5538, 0.6716, 0.6931,
1.0815, 1.5538, 0.6716, 0.6931]
trend = [np.nan, np.nan, 171.62, 204.00, 221.25, 245.12, 319.75,
451.50, 561.12, 619.25, 615.62, 548.00, 462.12, 381.12,
316.62, 264.00, 228.38, 210.75, 188.38, 199.00, 207.12,
191.00, 166.88, 107.25, 80.50, 79.12, 78.75, 116.50,
140.00, 157.38, np.nan, np.nan]
random = [np.nan, np.nan, 1.29263, 1.51360, 1.03223, 0.62226,
1.04771, 1.05139, 1.20124, 0.84080, 1.28182, 1.28752,
1.08043, 0.77172, 0.91697, 0.96191, 1.36441, 0.72986,
1.01171, 0.73956, 1.03566, 1.44556, 0.02677, 1.31843,
0.49390, 1.14688, 1.45582, 0.16101, 0.82555, 1.47633,
np.nan, np.nan]
assert_almost_equal(res_mult.seasonal, seasonal, 4)
assert_almost_equal(res_mult.trend, trend, 2)
assert_almost_equal(res_mult.resid, random, 4)
# test odd
res_add = seasonal_decompose(self.data.values[:-1], freq=4)
seasonal = [68.18, 69.02, -82.66, -54.54, 68.18, 69.02, -82.66,
-54.54, 68.18, 69.02, -82.66, -54.54, 68.18, 69.02,
-82.66, -54.54, 68.18, 69.02, -82.66, -54.54, 68.18,
69.02, -82.66, -54.54, 68.18, 69.02, -82.66, -54.54,
68.18, 69.02, -82.66]
trend = [np.nan, np.nan, 159.12, 204.00, 221.25, 245.12, 319.75,
451.50, 561.12, 619.25, 615.62, 548.00, 462.12, 381.12,
316.62, 264.00, 228.38, 210.75, 188.38, 199.00, 207.12,
191.00, 166.88, 72.00, -9.25, -33.12, -36.75, 36.25,
103.00, np.nan, np.nan]
random = [np.nan, np.nan, 72.538, 64.538, -42.426, -77.150,
-12.087, -67.962, 99.699, 120.725, -2.962, -4.462,
9.699, 6.850, -38.962, -33.462, 40.449, -40.775, 22.288,
-42.462, -43.301, 168.975, -81.212, 80.538, -15.926,
-176.900, 42.413, 5.288, -46.176, np.nan, np.nan]
assert_almost_equal(res_add.seasonal, seasonal, 2)
assert_almost_equal(res_add.trend, trend, 2)
assert_almost_equal(res_add.resid, random, 3)
def test_pandas(self):
res_add = seasonal_decompose(self.data, freq=4)
seasonal = [62.46, 86.17, -88.38, -60.25, 62.46, 86.17, -88.38,
-60.25, 62.46, 86.17, -88.38, -60.25, 62.46, 86.17,
-88.38, -60.25, 62.46, 86.17, -88.38, -60.25,
62.46, 86.17, -88.38, -60.25, 62.46, 86.17, -88.38,
-60.25, 62.46, 86.17, -88.38, -60.25]
trend = [np.nan, np.nan, 159.12, 204.00, 221.25, 245.12, 319.75,
451.50, 561.12, 619.25, 615.62, 548.00, 462.12, 381.12,
316.62, 264.00, 228.38, 210.75, 188.38, 199.00, 207.12,
191.00, 166.88, 72.00, -9.25, -33.12, -36.75, 36.25,
103.00, 131.62, np.nan, np.nan]
random = [np.nan, np.nan, 78.254, 70.254, -36.710, -94.299, -6.371,
-62.246, 105.415, 103.576, 2.754, 1.254, 15.415, -10.299,
-33.246, -27.746, 46.165, -57.924, 28.004, -36.746,
-37.585, 151.826, -75.496, 86.254, -10.210, -194.049,
48.129, 11.004, -40.460, 143.201, np.nan, np.nan]
assert_almost_equal(res_add.seasonal.values.squeeze(), seasonal, 2)
assert_almost_equal(res_add.trend.values.squeeze(), trend, 2)
assert_almost_equal(res_add.resid.values.squeeze(), random, 3)
assert_equal(res_add.seasonal.index.values.squeeze(),
self.data.index.values)
res_mult = seasonal_decompose(np.abs(self.data), 'm', freq=4)
seasonal = [1.0815, 1.5538, 0.6716, 0.6931, 1.0815, 1.5538, 0.6716,
0.6931, 1.0815, 1.5538, 0.6716, 0.6931, 1.0815, 1.5538,
0.6716, 0.6931, 1.0815, 1.5538, 0.6716, 0.6931, 1.0815,
1.5538, 0.6716, 0.6931, 1.0815, 1.5538, 0.6716, 0.6931,
1.0815, 1.5538, 0.6716, 0.6931]
trend = [np.nan, np.nan, 171.62, 204.00, 221.25, 245.12, 319.75,
451.50, 561.12, 619.25, 615.62, 548.00, 462.12, 381.12,
316.62, 264.00, 228.38, 210.75, 188.38, 199.00, 207.12,
191.00, 166.88, 107.25, 80.50, 79.12, 78.75, 116.50,
140.00, 157.38, np.nan, np.nan]
random = [np.nan, np.nan, 1.29263, 1.51360, 1.03223, 0.62226,
1.04771, 1.05139, 1.20124, 0.84080, 1.28182, 1.28752,
1.08043, 0.77172, 0.91697, 0.96191, 1.36441, 0.72986,
1.01171, 0.73956, 1.03566, 1.44556, 0.02677, 1.31843,
0.49390, 1.14688, 1.45582, 0.16101, 0.82555, 1.47633,
np.nan, np.nan]
assert_almost_equal(res_mult.seasonal.values.squeeze(), seasonal, 4)
assert_almost_equal(res_mult.trend.values.squeeze(), trend, 2)
assert_almost_equal(res_mult.resid.values.squeeze(), random, 4)
assert_equal(res_mult.seasonal.index.values.squeeze(),
self.data.index.values)
def test_filt(self):
filt = np.array([1/8., 1/4., 1./4, 1/4., 1/8.])
res_add = seasonal_decompose(self.data.values, filt=filt, freq=4)
seasonal = [62.46, 86.17, -88.38, -60.25, 62.46, 86.17, -88.38,
-60.25, 62.46, 86.17, -88.38, -60.25, 62.46, 86.17,
-88.38, -60.25, 62.46, 86.17, -88.38, -60.25,
62.46, 86.17, -88.38, -60.25, 62.46, 86.17, -88.38,
-60.25, 62.46, 86.17, -88.38, -60.25]
trend = [np.nan, np.nan, 159.12, 204.00, 221.25, 245.12, 319.75,
451.50, 561.12, 619.25, 615.62, 548.00, 462.12, 381.12,
316.62, 264.00, 228.38, 210.75, 188.38, 199.00, 207.12,
191.00, 166.88, 72.00, -9.25, -33.12, -36.75, 36.25,
103.00, 131.62, np.nan, np.nan]
random = [np.nan, np.nan, 78.254, 70.254, -36.710, -94.299, -6.371,
-62.246, 105.415, 103.576, 2.754, 1.254, 15.415, -10.299,
-33.246, -27.746, 46.165, -57.924, 28.004, -36.746,
-37.585, 151.826, -75.496, 86.254, -10.210, -194.049,
48.129, 11.004, -40.460, 143.201, np.nan, np.nan]
assert_almost_equal(res_add.seasonal, seasonal, 2)
assert_almost_equal(res_add.trend, trend, 2)
assert_almost_equal(res_add.resid, random, 3)
def test_raises(self):
assert_raises(ValueError, seasonal_decompose, self.data.values)
assert_raises(ValueError, seasonal_decompose, self.data, 'm',
freq=4)
x = self.data.astype(float).copy()
x.ix[2] = np.nan
assert_raises(ValueError, seasonal_decompose, x)
| bsd-3-clause |
qilicun/python | python2/plotmap.py | 1 | 2234 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
try:
from mpl_toolkits.basemap import Basemap
have_basemap = True
except ImportError:
have_basemap = False
def plotmap():
# create figure
fig = plt.figure(figsize=(8,8))
# set up orthographic map projection with
# perspective of satellite looking down at 50N, 100W.
# use low resolution coastlines.
map = Basemap(projection='ortho',lat_0=50,lon_0=-100,resolution='l')
# lat/lon coordinates of five cities.
lats=[40.02,32.73,38.55,48.25,17.29]
lons=[-105.16,-117.16,-77.00,-114.21,-88.10]
cities=['Boulder, CO','San Diego, CA',
'Washington, DC','Whitefish, MT','Belize City, Belize']
# compute the native map projection coordinates for cities.
xc,yc = map(lons,lats)
# make up some data on a regular lat/lon grid.
nlats = 73; nlons = 145; delta = 2.*np.pi/(nlons-1)
lats = (0.5*np.pi-delta*np.indices((nlats,nlons))[0,:,:])
lons = (delta*np.indices((nlats,nlons))[1,:,:])
wave = 0.75*(np.sin(2.*lats)**8*np.cos(4.*lons))
mean = 0.5*np.cos(2.*lats)*((np.sin(2.*lats))**2 + 2.)
# compute native map projection coordinates of lat/lon grid.
# (convert lons and lats to degrees first)
x, y = map(lons*180./np.pi, lats*180./np.pi)
# draw map boundary
map.drawmapboundary(color="0.9")
# draw graticule (latitude and longitude grid lines)
map.drawmeridians(np.arange(0,360,30),color="0.9")
map.drawparallels(np.arange(-90,90,30),color="0.9")
# plot filled circles at the locations of the cities.
map.plot(xc,yc,'wo')
# plot the names of five cities.
for name,xpt,ypt in zip(cities,xc,yc):
plt.text(xpt+100000,ypt+100000,name,fontsize=9,color='w')
# contour data over the map.
cs = map.contour(x,y,wave+mean,15,linewidths=1.5)
# draw blue marble image in background.
# (downsample the image by 50% for speed)
map.bluemarble(scale=0.5)
def plotempty():
# create figure
fig = plt.figure(figsize=(8,8))
fig.text(0.5, 0.5, "Sorry, could not import Basemap",
horizontalalignment='center')
if have_basemap:
plotmap()
else:
plotempty()
plt.show()
| gpl-3.0 |
AlexHatesUnicorns/FDTD_Solver | postprocessing/calculate_spot.py | 2 | 5895 | import numpy as np
import matplotlib.pyplot as plt
import argparse
import traceback
import tarfile
import os
import re
import time
def read_tarinfo(fname):
tar = tarfile.open(fname)
return list(tar.getmembers()), tar
def read(file, tar_info):
f = file.extractfile(tar_info)
if f is None:
return None, True
content = f.read()
return content, False
def should_parse_file(tar_file, t_start, t_end):
regex = re.compile(r"i_(\w+)_(\d+)\.txt")
try:
m = regex.search(tar_file.name)
file_type = m.group(1)
t = int(m.group(2))
return t_start <= t <= t_end and file_type == 'small'
except BaseException:
return False
def output_spot(name, content, dir_name, th, ext):
splited = content.decode("utf-8").split("\n")
# Get area size
first_x = float('inf')
first_y = float('inf')
last_x = 0
last_y = 0
for line in splited:
line_splited = line.split(" ")
if len(line_splited) != 3:
print(line_splited)
continue
y = int(line_splited[1])
x = int(line_splited[0])
if last_x < x:
last_x = x
if first_x > x:
first_x = x
if last_y < y:
last_y = y
if first_y > y:
first_y = y
size_x = last_x - first_x
size_y = last_y - first_y
data = np.zeros((size_y + 1, size_x + 1))
print("File {name} obtain size_x={size_x} size_y={size_y}".format(
name=name,
size_x=size_x,
size_y=size_y
))
# Read to NP array
for line in splited:
line_splited = line.split(" ")
if len(line_splited) != 3:
print(line_splited)
continue
y = int(line_splited[1])
x = int(line_splited[0])
val = float(line_splited[2])
data[y - first_y, x - first_x] = val
# Iterate over each slice
spot_x = 0
spot_y = 0
spot_size = 1000
max_radius = 500
spots = []
spots_energy = []
for column_idx in range(data.shape[1]):
column = data[:, column_idx]
nonzero = np.count_nonzero(column)
if nonzero == 0:
continue
spot_size = len(column)
spot_energy = 0
max_pos = np.argmax(column)
total_energy = np.sum(column)
for radius in range(10, max_radius):
spot = column[max_pos - radius : max_pos + radius]
energy_inside_spot = np.sum(spot)
if energy_inside_spot/total_energy > th[0]:
spot_size = 2 * radius
spot_energy = energy_inside_spot
break
spots.append(spot_size)
spots_energy.append(spot_energy)
min_spot_idx = np.argmin(spots)
max_pos = int(len(data[:, min_spot_idx])/2)
min_spot_column = data[max_pos - max_radius : max_pos + max_radius, min_spot_idx]
actual_spot = 2 * max_radius
actual_spot_energy = 0
total_energy = np.sum(min_spot_column)
for radius in range(10, max_radius):
center = int(len(min_spot_column)/2)
spot = min_spot_column[center - radius : center + radius]
energy_inside_spot = np.sum(spot)
if energy_inside_spot/total_energy > th[1]:
actual_spot = 2 * radius
actual_spot_energy = energy_inside_spot
break
plt.gcf().clear()
plt.plot(range(max_pos - max_radius, max_pos + max_radius), min_spot_column)
plt.text(max_pos - max_radius, 5, "X={x} Spot={spot}\nE={spot_energy}".format(
x=first_x + min_spot_idx,
spot=actual_spot,
spot_energy=round(actual_spot_energy, 2)
), fontsize=12)
plt.savefig(name.replace(".png", "_x={x}_spot={spot}.{ext}".format(
x=first_x + min_spot_idx,
spot=actual_spot,
ext=ext
)))
def main(tar_name, t_start, t_end, output_name, th, ext):
tar_info, file = read_tarinfo(tar_name)
dir_name = "spots_{}".format(tar_name.replace(".tar.gz", ""))
print(tar_info)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
for tf in tar_info:
try:
if not should_parse_file(tf, t_start, t_end):
print("Skipping: {}".format(tf.name))
continue
print("Processing {}".format(tf.name))
content, isErr = read(file, tf)
if isErr:
print("Error during file processing: {}".format(tf.name))
continue
output_spot("spots_" + tf.name.replace(".txt", ".png"), content, dir_name, th, ext)
except BaseException:
traceback.print_exc()
continue
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--out_name", help="Path to file in which image will be saved")
parser.add_argument("--th1", help="Threshold for spot finding algorithm step 1")
parser.add_argument("--th2", help="Threshold for spot finding algorithm step 2")
parser.add_argument("--tar_file", help="Path to archive")
parser.add_argument("--t_start", help="Time from to start read files")
parser.add_argument("--t_end", help="Time until dumps should ba analized")
parser.add_argument("--ext", help="output images extensions typically png or pdf")
args = parser.parse_args()
out_name = str(args.out_name)
tar_file = str(args.tar_file)
t_start = int(args.t_start)
t_end = int(args.t_end)
th1 = float(args.th1)
th2 = float(args.th2)
ext = str(args.ext)
print("Start processing spots: tar_file={tar_file}\n out_name={out_name}\n t_start={t_start}\n t_end={t_end}\n".format(
tar_file=tar_file,
out_name=out_name,
t_start=t_start,
t_end=t_end,
))
main(
tar_name=tar_file,
output_name=out_name,
t_start=t_start,
t_end=t_end,
th=(th1, th2),
ext=ext
)
| mit |
nomadcube/scikit-learn | examples/covariance/plot_mahalanobis_distances.py | 348 | 6232 | r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
###############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
| bsd-3-clause |
jalexvig/tensorflow | tensorflow/examples/learn/text_classification_cnn.py | 21 | 5221 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for CNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
EMBEDDING_SIZE = 20
N_FILTERS = 10
WINDOW_SIZE = 20
FILTER_SHAPE1 = [WINDOW_SIZE, EMBEDDING_SIZE]
FILTER_SHAPE2 = [WINDOW_SIZE, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
n_words = 0
MAX_LABEL = 15
WORDS_FEATURE = 'words' # Name of the input words feature.
def cnn_model(features, labels, mode):
"""2 layer ConvNet to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features[WORDS_FEATURE], vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
word_vectors = tf.expand_dims(word_vectors, 3)
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.layers.conv2d(
word_vectors,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE1,
padding='VALID',
# Add a ReLU for non linearity.
activation=tf.nn.relu)
# Max pooling across output of Convolution+Relu.
pool1 = tf.layers.max_pooling2d(
conv1,
pool_size=POOLING_WINDOW,
strides=POOLING_STRIDE,
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.layers.conv2d(
pool1,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE2,
padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), axis=[1])
# Apply regular WX + B and classification.
logits = tf.layers.dense(pool2, MAX_LABEL, activation=None)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(
MAX_DOCUMENT_LENGTH)
x_train = np.array(list(vocab_processor.fit_transform(x_train)))
x_test = np.array(list(vocab_processor.transform(x_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
classifier = tf.estimator.Estimator(model_fn=cnn_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_train},
y=y_train,
batch_size=len(x_train),
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Evaluate.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False)
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy: {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.