repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
piyueh/PetIBM | examples/ibpm/cylinder2dRe3000_GPU/scripts/plotVorticity.py | 6 | 1402 | """
Computes, plots, and saves the 2D vorticity field from a PetIBM simulation
after 3000 time steps (3 non-dimensional time-units).
"""
import pathlib
import h5py
import numpy
from matplotlib import pyplot
simu_dir = pathlib.Path(__file__).absolute().parents[1]
data_dir = simu_dir / 'output'
# Read vorticity field and its grid from files.
name = 'wz'
filepath = data_dir / 'grid.h5'
f = h5py.File(filepath, 'r')
x, y = f[name]['x'][:], f[name]['y'][:]
X, Y = numpy.meshgrid(x, y)
timestep = 3000
filepath = data_dir / '{:0>7}.h5'.format(timestep)
f = h5py.File(filepath, 'r')
wz = f[name][:]
# Read body coordinates from file.
filepath = simu_dir / 'circle.body'
with open(filepath, 'r') as infile:
xb, yb = numpy.loadtxt(infile, dtype=numpy.float64,
unpack=True, skiprows=1)
pyplot.rc('font', family='serif', size=16)
# Plot the filled contour of the vorticity.
fig, ax = pyplot.subplots(figsize=(6.0, 6.0))
ax.grid()
ax.set_xlabel('x')
ax.set_ylabel('y')
levels = numpy.linspace(-56.0, 56.0, 28)
ax.contour(X, Y, wz, levels=levels, colors='black')
ax.plot(xb, yb, color='red')
ax.set_xlim(-0.6, 1.6)
ax.set_ylim(-0.8, 0.8)
ax.set_aspect('equal')
fig.tight_layout()
pyplot.show()
# Save figure.
fig_dir = simu_dir / 'figures'
fig_dir.mkdir(parents=True, exist_ok=True)
filepath = fig_dir / 'wz{:0>7}.png'.format(timestep)
fig.savefig(str(filepath), dpi=300)
| bsd-3-clause |
trustedanalytics/spark-tk | regression-tests/sparktkregtests/testcases/frames/cumulative_tally_test.py | 13 | 5020 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test cumulative tally functions, hand calculated baselines"""
import unittest
from sparktkregtests.lib import sparktk_test
class TestCumulativeTally(sparktk_test.SparkTKTestCase):
def setUp(self):
super(TestCumulativeTally, self).setUp()
data_tally = self.get_file("cumu_tally_seq.csv")
schema_tally = [("sequence", int),
("user_id", int),
("vertex_type", str),
("movie_id", int),
("rating", int),
("splits", str),
("count", int),
("percent_count", float)]
self.tally_frame = self.context.frame.import_csv(data_tally,
schema=schema_tally)
def test_tally_and_tally_percent(self):
"""Test tally and tally percent"""
self.tally_frame.tally("rating", '5')
self.tally_frame.tally_percent("rating", '5')
pd_frame = self.tally_frame.to_pandas(self.tally_frame.count())
for index, row in pd_frame.iterrows():
self.assertAlmostEqual(
row['percent_count'], row['rating_tally_percent'], delta=.0001)
self.assertEqual(row['count'], row['rating_tally'])
def test_tally_colname_collision(self):
"""Test tally column names collide gracefully"""
# repeatedly run tally to force collisions
self.tally_frame.tally("rating", '5')
self.tally_frame.tally_percent("rating", '5')
self.tally_frame.tally("rating", '5')
self.tally_frame.tally_percent("rating", '5')
self.tally_frame.tally("rating", '5')
self.tally_frame.tally_percent("rating", '5')
columns = [u'sequence',
u'user_id',
u'vertex_type',
u'movie_id',
u'rating',
u'splits',
u'count',
u'percent_count',
u'rating_tally',
u'rating_tally_percent',
u'rating_tally_0',
u'rating_tally_percent_0',
u'rating_tally_1',
u'rating_tally_percent_1']
self.assertItemsEqual(self.tally_frame.column_names, columns)
def test_tally_no_column(self):
"""Test errors on non-existant column"""
with self.assertRaisesRegexp(Exception, "Invalid column name"):
self.tally_frame.tally("no_such_column", '5')
def test_tally_no_column_percent(self):
with self.assertRaisesRegexp(Exception, "Invalid column name"):
self.tally_frame.tally_percent("no_such_column", '5')
def test_tally_none(self):
"""Test tally none column errors"""
with self.assertRaisesRegexp(Exception,
"column name for sample is required"):
self.tally_frame.tally(None, '5')
def test_tally_none_percent(self):
with self.assertRaisesRegexp(Exception,
"column name for sample is required"):
self.tally_frame.tally_percent(None, '5')
def test_tally_bad_type(self):
"""Test tally on incorrect type errors"""
with self.assertRaisesRegexp(Exception, "does not exist"):
self.tally_frame.tally("rating", 5)
def test_tally_bad_type_percent(self):
with self.assertRaisesRegexp(Exception, "does not exist"):
self.tally_frame.tally_percent("rating", 5)
def test_tally_value_none(self):
"""Test tally on none errors"""
with self.assertRaisesRegexp(Exception,
"count value for the sample is required"):
self.tally_frame.tally("rating", None)
def test_tally_value_none_percent(self):
with self.assertRaisesRegexp(Exception,
"count value for the sample is required"):
self.tally_frame.tally_percent("rating", None)
def test_tally_no_element(self):
"""Test tallying on non-present element is correct"""
self.tally_frame.tally_percent("rating", "12")
local_frame = self.tally_frame.to_pandas(self.tally_frame.count())
for index, row in local_frame.iterrows():
self.assertEqual(row["rating_tally_percent"], 1.0)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
tawsifkhan/scikit-learn | benchmarks/bench_plot_neighbors.py | 287 | 6433 | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import pylab as pl
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
pl.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = pl.subplot(sbplt, yscale='log')
pl.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = pl.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = pl.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
pl.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
pl.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
pl.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
pl.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
pl.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
pl.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
pl.show()
| bsd-3-clause |
crisis-economics/housingModel | src/main/resources/calibration/code/bak/temp.py | 4 | 3241 | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import pandas as pd
import matplotlib.pyplot as plt
######################################################################
class QuarterlyTable(pd.Series):
'Representation of a column of numbers at quarterly time intervals'
offset = 0
# offset - row number of first quarter of 2000
# columns - column containing data
def __init__(self, offset, dataSeries):
pd.Series.__init__(self,data=dataSeries)
self.offset = offset
def val(self, month, year):
return(self.iloc[self.getRow(month, year)])
def annualGrowth(self, month, year, lag):
row = self.getRow(month, year)
lag = lag/3
return( (self.iloc[row]/
self.iloc[row-lag] - 1.0)*4.0/lag)
def annualGrowthData(self, lag):
lag = lag/3
return(QuarterlyTable(self.offset-lag,(self[lag:].values/self[:self.size-lag].values)*4.0/lag))
def getRow(self, month, year):
row = (month-1)/3 + 4*(year-2000) + self.offset
if(row<0): row = 0
return(row)
######################################################################
class Nationwide(QuarterlyTable):
'Nationwide House Price Appreciation table (Non-seasonally adjusted)'
def __init__(self):
QuarterlyTable.__init__(self, 102, pd.read_excel("/home/daniel/data/datasets/HPI/NationwideHPI.xls").iloc[5:,28])
######################################################################
class NationwideSeasonal(QuarterlyTable):
'Nationwide House Price Appreciation table (Seasonally adjusted)'
def __init__(self):
QuarterlyTable.__init__(self, 102, pd.read_excel("/home/daniel/data/datasets/HPI/NationwideHPISeasonal.xls").iloc[5:,13])
######################################################################
class HalifaxSeasonal(QuarterlyTable):
'Halifax seasonal House Price Appreciation (seasonally adjusted)'
def __init__(self):
QuarterlyTable.__init__(self, 68, pd.read_excel("/home/daniel/data/datasets/HPI/HalifaxHPI.xls", sheetname="All (SA) Quarters").iloc[5:,25])
######################################################################
class HPISeasonal():
nationwide = pd.Series()
halifax = pd.Series()
def __init__(self):
self.nationwide = NationwideSeasonal()
self.halifax = HalifaxSeasonal()
def HPI(self):
offset = self.nationwide.offset - self.halifax.offset
size = self.halifax.size
return(QuarterlyTable(self.halifax.offset,(self.nationwide[offset:offset+size] + self.halifax[:])/2.0))
def HPA(self):
hpa1 = self.nationwide.annualGrowthData(12)
hpa2 = self.halifax.annualGrowthData(12)
offset = hpa1.offset - hpa2.offset
size = hpa2.size
return(QuarterlyTable(hpa2.offset,(hpa1[offset:offset+size].values + hpa2[:size-1].values)/2.0))
hpi = HalifaxSeasonal()
hpa = hpi.annualGrowthData(12)
row = hpa.getRow(1,1990)
hpi2 = NationwideSeasonal()
hpa2 = hpi2.annualGrowthData(12)
row2 = hpa2.getRow(1,1990)
plt.plot(hpa[row:])
plt.plot(hpa2[row2:])
hpi3 = HPISeasonal()
hpa3 = hpi3.HPA()
row3 = hpa3.getRow(1,1990)
plt.plot(hpa3[row3:])
| mit |
maropu/spark | python/pyspark/pandas/missing/common.py | 16 | 2092 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
memory_usage = lambda f: f(
"memory_usage",
reason="Unlike pandas, most DataFrames are not materialized in memory in Spark "
"(and pandas-on-Spark), and as a result memory_usage() does not do what you intend it "
"to do. Use Spark's web UI to monitor disk and memory usage of your application.",
)
array = lambda f: f(
"array", reason="If you want to collect your data as an NumPy array, use 'to_numpy()' instead."
)
to_pickle = lambda f: f(
"to_pickle",
reason="For storage, we encourage you to use Delta or Parquet, instead of Python pickle "
"format.",
)
to_xarray = lambda f: f(
"to_xarray",
reason="If you want to collect your data as an NumPy array, use 'to_numpy()' instead.",
)
to_list = lambda f: f(
"to_list",
reason="If you want to collect your data as an NumPy array, use 'to_numpy()' instead.",
)
tolist = lambda f: f(
"tolist", reason="If you want to collect your data as an NumPy array, use 'to_numpy()' instead."
)
__iter__ = lambda f: f(
"__iter__",
reason="If you want to collect your data as an NumPy array, use 'to_numpy()' instead.",
)
duplicated = lambda f: f(
"duplicated",
reason="'duplicated' API returns np.ndarray and the data size is too large."
"You can just use DataFrame.deduplicated instead",
)
| apache-2.0 |
wlamond/scikit-learn | sklearn/utils/extmath.py | 1 | 26768 | """
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# Giorgio Patrini
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse, csr_matrix
from scipy.misc import logsumexp as scipy_logsumexp
from . import check_random_state, deprecated
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array
from ..exceptions import NonBLASDotWarning
@deprecated("sklearn.utils.extmath.norm was deprecated in version 0.19"
"and will be removed in 0.21. Use scipy.linalg.norm instead.")
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
return linalg.norm(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
if np.issubdtype(x.dtype, np.integer):
warnings.warn('Array type is integer, np.dot may overflow. '
'Data should be float type to avoid this issue',
UserWarning)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
if not isinstance(X, csr_matrix):
X = csr_matrix(X)
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Falling back to np.dot. '
'Data must be of same type of either '
'32 or 64 bit float for the BLAS function, gemm, to be '
'used for an efficient dot operation. ',
NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.exceptions import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter,
power_iteration_normalizer='auto',
random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A : 2D array
The input data matrix
size : integer
Size of the return array
n_iter : integer
Number of power iterations used to stabilize the result
power_iteration_normalizer : 'auto' (default), 'QR', 'LU', 'none'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter`<=2 and switches to LU otherwise.
.. versionadded:: 0.18
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
Returns
-------
Q : 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
random_state = check_random_state(random_state)
# Generating normal random vectors with shape: (A.shape[1], size)
Q = random_state.normal(size=(A.shape[1], size))
# Deal with "auto" mode
if power_iteration_normalizer == 'auto':
if n_iter <= 2:
power_iteration_normalizer = 'none'
else:
power_iteration_normalizer = 'LU'
# Perform power iterations with Q to further 'imprint' the top
# singular vectors of A in Q
for i in range(n_iter):
if power_iteration_normalizer == 'none':
Q = safe_sparse_dot(A, Q)
Q = safe_sparse_dot(A.T, Q)
elif power_iteration_normalizer == 'LU':
Q, _ = linalg.lu(safe_sparse_dot(A, Q), permute_l=True)
Q, _ = linalg.lu(safe_sparse_dot(A.T, Q), permute_l=True)
elif power_iteration_normalizer == 'QR':
Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode='economic')
Q, _ = linalg.qr(safe_sparse_dot(A.T, Q), mode='economic')
# Sample the range of A using by linear projection of Q
# Extract an orthonormal basis
Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter='auto',
power_iteration_normalizer='auto', transpose='auto',
flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M : ndarray or sparse matrix
Matrix to decompose
n_components : int
Number of singular values and vectors to extract.
n_oversamples : int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples. Smaller
number can improve speed but can negatively impact the quality of
approximation of singular vectors and singular values.
n_iter : int or 'auto' (default is 'auto')
Number of power iterations. It can be used to deal with very noisy
problems. When 'auto', it is set to 4, unless `n_components` is small
(< .1 * min(X.shape)) `n_iter` in which case is set to 7.
This improves precision with few components.
.. versionchanged:: 0.18
power_iteration_normalizer : 'auto' (default), 'QR', 'LU', 'none'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter`<=2 and switches to LU otherwise.
.. versionadded:: 0.18
transpose : True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case.
.. versionchanged:: 0.18
flip_sign : boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components. In order to
obtain further speed up, `n_iter` can be set <=2 (at the cost of
loss of precision).
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
* An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if n_iter == 'auto':
# Checks if the number of iterations is explicitely specified
# Adjust n_iter. 7 was found a good compromise for PCA. See #5299
n_iter = 7 if n_components < .1 * min(M.shape) else 4
if transpose == 'auto':
transpose = n_samples < n_features
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter,
power_iteration_normalizer, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
if not transpose:
U, V = svd_flip(U, V)
else:
# In case of transpose u_based_decision=false
# to actually flip based on u and not v.
U, V = svd_flip(U, V, u_based_decision=False)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
@deprecated("sklearn.utils.extmath.logsumexp was deprecated in version 0.19"
"and will be removed in 0.21. Use scipy.misc.logsumexp instead.")
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
return scipy_logsumexp(arr, axis)
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
@deprecated("sklearn.utils.extmath.pinvh was deprecated in version 0.19"
"and will be removed in 0.21. Use scipy.linalg.pinvh instead.")
def pinvh(a, cond=None, rcond=None, lower=True):
return linalg.pinvh(a, cond, rcond, lower)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping.
Otherwise, use the rows of v. The choice of which variable to base the
decision on is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``scipy.special.expit``.
Parameters
----------
X : array-like, shape (M, N) or (M, )
Argument to the logistic function
out : array-like, shape: (M, N) or (M, ), optional:
Preallocated output array.
Returns
-------
out : array, shape (M, N) or (M, )
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = np.atleast_2d(X)
X = check_array(X, dtype=np.float64)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def softmax(X, copy=True):
"""
Calculate the softmax function.
The softmax function is calculated by
np.exp(X) / np.sum(np.exp(X), axis=1)
This will cause overflow when large values are exponentiated.
Hence the largest value in each row is subtracted from each data
point to prevent this.
Parameters
----------
X : array-like, shape (M, N)
Argument to the logistic function
copy : bool, optional
Copy X or not.
Returns
-------
out : array, shape (M, N)
Softmax function evaluated at every point in x
"""
if copy:
X = np.copy(X)
max_prob = np.max(X, axis=1).reshape((-1, 1))
X -= max_prob
np.exp(X, X)
sum_prob = np.sum(X, axis=1).reshape((-1, 1))
X /= sum_prob
return X
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _incremental_mean_and_var(X, last_mean=.0, last_variance=None,
last_sample_count=0):
"""Calculate mean update and a Youngs and Cramer variance update.
last_mean and last_variance are statistics computed at the last step by the
function. Both must be initialized to 0.0. In case no scaling is required
last_variance can be None. The mean is always required and returned because
necessary for the calculation of the variance. last_n_samples_seen is the
number of samples encountered until now.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
last_mean : array-like, shape: (n_features,)
last_variance : array-like, shape: (n_features,)
last_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
If None, only mean is computed
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample
variance: recommendations, The American Statistician, Vol. 37, No. 3,
pp. 242-247
Also, see the sparse implementation of this in
`utils.sparsefuncs.incr_mean_variance_axis` and
`utils.sparsefuncs_fast.incr_mean_variance_axis0`
"""
# old = stats until now
# new = the current increment
# updated = the aggregated stats
last_sum = last_mean * last_sample_count
new_sum = X.sum(axis=0)
new_sample_count = X.shape[0]
updated_sample_count = last_sample_count + new_sample_count
updated_mean = (last_sum + new_sum) / updated_sample_count
if last_variance is None:
updated_variance = None
else:
new_unnormalized_variance = X.var(axis=0) * new_sample_count
if last_sample_count == 0: # Avoid division by 0
updated_unnormalized_variance = new_unnormalized_variance
else:
last_over_new_count = last_sample_count / new_sample_count
last_unnormalized_variance = last_variance * last_sample_count
updated_unnormalized_variance = (
last_unnormalized_variance +
new_unnormalized_variance +
last_over_new_count / updated_sample_count *
(last_sum / last_over_new_count - new_sum) ** 2)
updated_variance = updated_unnormalized_variance / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08):
"""Use high precision for cumsum and check that final value matches sum
Parameters
----------
arr : array-like
To be cumulatively summed as flat
axis : int, optional
Axis along which the cumulative sum is computed.
The default (None) is to compute the cumsum over the flattened array.
rtol : float
Relative tolerance, see ``np.allclose``
atol : float
Absolute tolerance, see ``np.allclose``
"""
# sum is as unstable as cumsum for numpy < 1.9
if np_version < (1, 9):
return np.cumsum(arr, axis=axis, dtype=np.float64)
out = np.cumsum(arr, axis=axis, dtype=np.float64)
expected = np.sum(arr, axis=axis, dtype=np.float64)
if not np.all(np.isclose(out.take(-1, axis=axis), expected, rtol=rtol,
atol=atol, equal_nan=True)):
warnings.warn('cumsum was found to be unstable: '
'its last element does not correspond to sum',
RuntimeWarning)
return out
| bsd-3-clause |
durgeshsamariya/durgeshsamariya.github.io | markdown_generator/talks.py | 199 | 4000 |
# coding: utf-8
# # Talks markdown generator for academicpages
#
# Takes a TSV of talks with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `talks.py`. Run either from the `markdown_generator` folder after replacing `talks.tsv` with one containing your data.
#
# TODO: Make this work with BibTex and other databases, rather than Stuart's non-standard TSV format and citation style.
# In[1]:
import pandas as pd
import os
# ## Data format
#
# The TSV needs to have the following columns: title, type, url_slug, venue, date, location, talk_url, description, with a header at the top. Many of these fields can be blank, but the columns must be in the TSV.
#
# - Fields that cannot be blank: `title`, `url_slug`, `date`. All else can be blank. `type` defaults to "Talk"
# - `date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper.
# - The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/talks/YYYY-MM-DD-[url_slug]`
# - The combination of `url_slug` and `date` must be unique, as it will be the basis for your filenames
#
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
talks = pd.read_csv("talks.tsv", sep="\t", header=0)
talks
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
if type(text) is str:
return "".join(html_escape_table.get(c,c) for c in text)
else:
return "False"
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page.
# In[5]:
loc_dict = {}
for row, item in talks.iterrows():
md_filename = str(item.date) + "-" + item.url_slug + ".md"
html_filename = str(item.date) + "-" + item.url_slug
year = item.date[:4]
md = "---\ntitle: \"" + item.title + '"\n'
md += "collection: talks" + "\n"
if len(str(item.type)) > 3:
md += 'type: "' + item.type + '"\n'
else:
md += 'type: "Talk"\n'
md += "permalink: /talks/" + html_filename + "\n"
if len(str(item.venue)) > 3:
md += 'venue: "' + item.venue + '"\n'
if len(str(item.location)) > 3:
md += "date: " + str(item.date) + "\n"
if len(str(item.location)) > 3:
md += 'location: "' + str(item.location) + '"\n'
md += "---\n"
if len(str(item.talk_url)) > 3:
md += "\n[More information here](" + item.talk_url + ")\n"
if len(str(item.description)) > 3:
md += "\n" + html_escape(item.description) + "\n"
md_filename = os.path.basename(md_filename)
#print(md)
with open("../_talks/" + md_filename, 'w') as f:
f.write(md)
# These files are in the talks directory, one directory below where we're working from.
| mit |
Succeed-Together/bakfu | classify/base.py | 2 | 1167 | """Base classes for classifiers"""
from ..core.classes import Processor
class BaseClassifier(Processor):
'''
The base class for classifiers.
'''
def __init__(self, *args, **kwargs):
super(BaseClassifier, self).__init__(*args, **kwargs)
self.classifier = None
class SklearnClassifier(BaseClassifier):
'''
A class wrapping sklearn classifiers.
'''
#The sklearn classifier
classifier_class = None
def __init__(self, *args, **kwargs):
super(BaseClassifier, self).__init__(*args, **kwargs)
self.init_classifier(*args, **kwargs)
def init_classifier(self, *args, **kwargs):
'''
Init sklearn classifier.
'''
self.classifier = self.classifier_class(*args, **kwargs)
def run_classifier(self, caller, *args, **kwargs):
pass
def run(self, caller, *args, **kwargs):
return self.run_classifier(caller, *args, **kwargs)
def __getattr__(self, attr):
'''Propagate attribute search to the clusterizer.'''
try:
return getattr(self, attr)
except:
return getattr(self.clusterizer, attr) | bsd-3-clause |
ctherien/pysptools | pysptools/classification/docstring.py | 1 | 7376 | #
#------------------------------------------------------------------------------
# Copyright (c) 2013-2014, Christian Therien
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
#
# docstring.py - This file is part of the PySptools package.
#
classify_docstring = """
Classify the HSI cube M with the spectral library E.
Parameters:
M: `numpy array`
A HSI cube (m x n x p).
E: `numpy array`
A spectral library (N x p).
threshold: `float [default 0.1] or list`
* If float, threshold is applied on all the spectra.
* If a list, individual threshold is applied on each
spectrum, in this case the list must have the same
number of threshold values than the number of spectra.
* Threshold have values between 0.0 and 1.0.
mask: `numpy array [default None]`
A binary mask, when *True* the selected pixel is classified.
Returns: `numpy array`
A class map (m x n x 1).
"""
get_single_map_docstring = """
Get individual classified map. See plot_single_map for
a description.
Parameters:
lib_idx: `int or string`
A number between 1 and the number of spectra in the library.
constrained: `boolean [default True]`
See plot_single_map for a description.
Returns: `numpy array`
The individual map (m x n x 1) associated to the lib_idx endmember.
"""
plot_single_map_docstring = """
Plot individual classified map. One for each spectrum.
Note that each individual map is constrained by the others.
This function is usefull to see the individual map that compose
the final class map returned by the classify method. It help
to define the spectra library. See the constrained parameter below.
Parameters:
path: `string`
The path where to put the plot.
lib_idx: `int or string`
* A number between 1 and the number of spectra in the library.
* 'all', plot all the individual maps.
constrained: `boolean [default True]`
* If constrained is True, print the individual maps as they compose the
final class map. Any potential intersection is removed in favor of
the lower value level for SAM and SID, or the nearest to 1 for NormXCorr. Use
this one to understand the final class map.
* If constrained is False, print the individual maps without intersection
removed, as they are generated. Use this one to have the real match.
stretch: `boolean [default False]`
Stretch the map between 0 and 1 giving a good distribution of the
color map.
colorMap: `string [default 'spectral']`
A matplotlib color map.
suffix: `string [default None]`
Add a suffix to the file name.
"""
display_single_map_docstring = """
Display individual classified map to a IPython Notebook. One for each spectrum.
Note that each individual map is constrained by the others.
This function is usefull to see the individual map that compose
the final class map returned by the classify method. It help
to define the spectra library. See the constrained parameter below.
Parameters:
lib_idx: `int or string`
* A number between 1 and the number of spectra in the library.
* 'all', plot all the individual maps.
constrained: `boolean [default True]`
* If constrained is True, print the individual maps as they compose the
final class map. Any potential intersection is removed in favor of
the lower value level for SAM and SID, or the nearest to 1 for NormXCorr. Use
this one to understand the final class map.
* If constrained is False, print the individual maps without intersection
removed, as they are generated. Use this one to have the real match.
stretch: `boolean [default False]`
Stretch the map between 0 and 1 giving a good distribution of the
color map.
colorMap: `string [default 'spectral']`
A matplotlib color map.
suffix: `string [default None]`
Add a suffix to the title.
"""
plot_docstring = """
Plot the class map.
Parameters:
path: `string`
The path where to put the plot.
labels: `list of string [default None]`
The legend labels. Can be used only if the input spectral library
E have more than 1 pixel.
mask: `numpy array [default None]`
A binary mask, when *True* the corresponding pixel is displayed.
interpolation: `string [default none]`
A matplotlib interpolation method.
colorMap: `string [default 'Accent']`
A color map element of
['Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3'],
"Accent" is the default and it fall back on "Jet".
suffix: `string [default None]`
Add a suffix to the file name.
"""
display_docstring = """
Display the class map to a IPython Notebook.
Parameters:
labels: `list of string [default None]`
The legend labels. Can be used only if the input spectral library
E have more than 1 pixel.
mask: `numpy array [default None]`
A binary mask, when *True* the corresponding pixel is displayed.
interpolation: `string [default none]`
A matplotlib interpolation method.
colorMap: `string [default 'Accent']`
A color map element of
['Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3'],
"Accent" is the default and it fall back on "Jet".
suffix: `string [default None]`
Add a suffix to the title.
"""
plot_histo_docstring = """
Plot the histogram.
Parameters:
path: `string`
The path where to put the plot.
suffix: `string [default None]`
Add a suffix to the file name.
"""
| apache-2.0 |
aabadie/scikit-learn | sklearn/utils/tests/test_linear_assignment.py | 421 | 1349 | # Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
| bsd-3-clause |
henryiii/rootpy | rootpy/plotting/root2matplotlib.py | 2 | 28765 | # Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
"""
This module provides functions that allow the plotting of ROOT histograms and
graphs with `matplotlib <http://matplotlib.org/>`_.
If you just want to save image files and don't want matplotlib to attempt to
create a graphical window, tell matplotlib to use a non-interactive backend
such as ``Agg`` when importing it for the first time (i.e. before importing
rootpy.plotting.root2matplotlib)::
import matplotlib
matplotlib.use('Agg') # do this before importing pyplot or root2matplotlib
This puts matplotlib in a batch state similar to ``ROOT.gROOT.SetBatch(True)``.
"""
from __future__ import absolute_import
# trigger ROOT's finalSetup (GUI thread) before matplotlib's
import ROOT
ROOT.kTRUE
from math import sqrt
try:
from itertools import izip as zip
except ImportError: # will be 3.x series
pass
import matplotlib.pyplot as plt
import numpy as np
from ..extern.six.moves import range
from .hist import _Hist
from .graph import _Graph1DBase
from .utils import get_limits
__all__ = [
'hist',
'bar',
'errorbar',
'fill_between',
'step',
'hist2d',
'imshow',
'contour',
]
def _set_defaults(obj, kwargs, types=['common']):
defaults = {}
for key in types:
if key == 'common':
defaults['label'] = obj.GetTitle()
defaults['visible'] = getattr(obj, 'visible', True)
defaults['alpha'] = getattr(obj, 'alpha', None)
elif key == 'line':
defaults['linestyle'] = obj.GetLineStyle('mpl')
defaults['linewidth'] = obj.GetLineWidth()
elif key == 'fill':
defaults['edgecolor'] = kwargs.get('color', obj.GetLineColor('mpl'))
defaults['facecolor'] = kwargs.get('color', obj.GetFillColor('mpl'))
root_fillstyle = obj.GetFillStyle('root')
if root_fillstyle == 0:
if not kwargs.get('fill'):
defaults['facecolor'] = 'none'
defaults['fill'] = False
elif root_fillstyle == 1001:
defaults['fill'] = True
else:
defaults['hatch'] = obj.GetFillStyle('mpl')
defaults['facecolor'] = 'none'
elif key == 'marker':
defaults['marker'] = obj.GetMarkerStyle('mpl')
defaults['markersize'] = obj.GetMarkerSize() * 5
defaults['markeredgecolor'] = obj.GetMarkerColor('mpl')
defaults['markerfacecolor'] = obj.GetMarkerColor('mpl')
elif key == 'errors':
defaults['ecolor'] = obj.GetLineColor('mpl')
elif key == 'errorbar':
defaults['fmt'] = obj.GetMarkerStyle('mpl')
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
def _set_bounds(h,
axes=None,
was_empty=True,
prev_xlim=None,
prev_ylim=None,
xpadding=0,
ypadding=.1,
xerror_in_padding=True,
yerror_in_padding=True,
snap=True,
logx=None,
logy=None):
if axes is None:
axes = plt.gca()
if prev_xlim is None:
prev_xlim = plt.xlim()
if prev_ylim is None:
prev_ylim = plt.ylim()
if logx is None:
logx = axes.get_xscale() == 'log'
if logy is None:
logy = axes.get_yscale() == 'log'
xmin, xmax, ymin, ymax = get_limits(
h,
xpadding=xpadding,
ypadding=ypadding,
xerror_in_padding=xerror_in_padding,
yerror_in_padding=yerror_in_padding,
snap=snap,
logx=logx,
logy=logy)
if was_empty:
axes.set_xlim([xmin, xmax])
axes.set_ylim([ymin, ymax])
else:
prev_xmin, prev_xmax = prev_xlim
if logx and prev_xmin <= 0:
axes.set_xlim([xmin, max(prev_xmax, xmax)])
else:
axes.set_xlim([min(prev_xmin, xmin), max(prev_xmax, xmax)])
prev_ymin, prev_ymax = prev_ylim
if logy and prev_ymin <= 0:
axes.set_ylim([ymin, max(prev_ymax, ymax)])
else:
axes.set_ylim([min(prev_ymin, ymin), max(prev_ymax, ymax)])
def _get_highest_zorder(axes):
return max([c.get_zorder() for c in axes.get_children()])
def _maybe_reversed(x, reverse=False):
if reverse:
return reversed(x)
return x
def hist(hists,
stacked=True,
reverse=False,
xpadding=0, ypadding=.1,
yerror_in_padding=True,
logy=None,
snap=True,
axes=None,
**kwargs):
"""
Make a matplotlib hist plot from a ROOT histogram, stack or
list of histograms.
Parameters
----------
hists : Hist, list of Hist, HistStack
The histogram(s) to be plotted
stacked : bool, optional (default=True)
If True then stack the histograms with the first histogram on the
bottom, otherwise overlay them with the first histogram in the
background.
reverse : bool, optional (default=False)
If True then reverse the order of the stack or overlay.
xpadding : float or 2-tuple of floats, optional (default=0)
Padding to add on the left and right sides of the plot as a fraction of
the axes width after the padding has been added. Specify unique left
and right padding with a 2-tuple.
ypadding : float or 2-tuple of floats, optional (default=.1)
Padding to add on the top and bottom of the plot as a fraction of
the axes height after the padding has been added. Specify unique top
and bottom padding with a 2-tuple.
yerror_in_padding : bool, optional (default=True)
If True then make the padding inclusive of the y errors otherwise
only pad around the y values.
logy : bool, optional (default=None)
Apply special treatment of a log-scale y-axis to display the histogram
correctly. If None (the default) then automatically determine if the
y-axis is log-scale.
snap : bool, optional (default=True)
If True (the default) then the origin is an implicit lower bound of the
histogram unless the histogram has both positive and negative bins.
axes : matplotlib Axes instance, optional (default=None)
The axes to plot on. If None then use the global current axes.
kwargs : additional keyword arguments, optional
All additional keyword arguments are passed to matplotlib's
fill_between for the filled regions and matplotlib's step function
for the edges.
Returns
-------
The return value from matplotlib's hist function, or list of such return
values if a stack or list of histograms was plotted.
"""
if axes is None:
axes = plt.gca()
if logy is None:
logy = axes.get_yscale() == 'log'
curr_xlim = axes.get_xlim()
curr_ylim = axes.get_ylim()
was_empty = not axes.has_data()
returns = []
if isinstance(hists, _Hist):
# This is a single plottable object.
returns = _hist(hists, axes=axes, logy=logy, **kwargs)
_set_bounds(hists, axes=axes,
was_empty=was_empty,
prev_xlim=curr_xlim,
prev_ylim=curr_ylim,
xpadding=xpadding, ypadding=ypadding,
yerror_in_padding=yerror_in_padding,
snap=snap,
logy=logy)
elif stacked:
# draw the top histogram first so its edges don't cover the histograms
# beneath it in the stack
if not reverse:
hists = list(hists)[::-1]
for i, h in enumerate(hists):
kwargs_local = kwargs.copy()
if i == len(hists) - 1:
low = h.Clone()
low.Reset()
else:
low = sum(hists[i + 1:])
high = h + low
high.alpha = getattr(h, 'alpha', None)
proxy = _hist(high, bottom=low, axes=axes, logy=logy, **kwargs)
returns.append(proxy)
if not reverse:
returns = returns[::-1]
_set_bounds(sum(hists), axes=axes,
was_empty=was_empty,
prev_xlim=curr_xlim,
prev_ylim=curr_ylim,
xpadding=xpadding, ypadding=ypadding,
yerror_in_padding=yerror_in_padding,
snap=snap,
logy=logy)
else:
for h in _maybe_reversed(hists, reverse):
returns.append(_hist(h, axes=axes, logy=logy, **kwargs))
if reverse:
returns = returns[::-1]
_set_bounds(hists[max(range(len(hists)), key=lambda idx: hists[idx].max())],
axes=axes,
was_empty=was_empty,
prev_xlim=curr_xlim,
prev_ylim=curr_ylim,
xpadding=xpadding, ypadding=ypadding,
yerror_in_padding=yerror_in_padding,
snap=snap,
logy=logy)
return returns
def _hist(h, axes=None, bottom=None, logy=None, zorder=None, **kwargs):
if axes is None:
axes = plt.gca()
if zorder is None:
zorder = _get_highest_zorder(axes) + 1
_set_defaults(h, kwargs, ['common', 'line', 'fill'])
kwargs_proxy = kwargs.copy()
fill = kwargs.pop('fill', False) or ('hatch' in kwargs)
if fill:
# draw the fill without the edge
if bottom is None:
bottom = h.Clone()
bottom.Reset()
fill_between(bottom, h, axes=axes, logy=logy, linewidth=0,
facecolor=kwargs['facecolor'],
edgecolor=kwargs['edgecolor'],
hatch=kwargs.get('hatch', None),
alpha=kwargs['alpha'],
zorder=zorder)
# draw the edge
s = step(h, axes=axes, logy=logy, label=None,
zorder=zorder + 1, alpha=kwargs['alpha'],
color=kwargs.get('color'))
# draw the legend proxy
if getattr(h, 'legendstyle', '').upper() == 'F':
proxy = plt.Rectangle((0, 0), 0, 0, **kwargs_proxy)
axes.add_patch(proxy)
else:
# be sure the linewidth is greater than zero...
proxy = plt.Line2D((0, 0), (0, 0),
linestyle=kwargs_proxy['linestyle'],
linewidth=kwargs_proxy['linewidth'],
color=kwargs_proxy['edgecolor'],
alpha=kwargs['alpha'],
label=kwargs_proxy['label'])
axes.add_line(proxy)
return proxy, s[0]
def bar(hists,
stacked=True,
reverse=False,
xerr=False, yerr=True,
xpadding=0, ypadding=.1,
yerror_in_padding=True,
rwidth=0.8,
snap=True,
axes=None,
**kwargs):
"""
Make a matplotlib bar plot from a ROOT histogram, stack or
list of histograms.
Parameters
----------
hists : Hist, list of Hist, HistStack
The histogram(s) to be plotted
stacked : bool or string, optional (default=True)
If True then stack the histograms with the first histogram on the
bottom, otherwise overlay them with the first histogram in the
background. If 'cluster', then the bars will be arranged side-by-side.
reverse : bool, optional (default=False)
If True then reverse the order of the stack or overlay.
xerr : bool, optional (default=False)
If True, x error bars will be displayed.
yerr : bool or string, optional (default=True)
If False, no y errors are displayed. If True, an individual y
error will be displayed for each hist in the stack. If 'linear' or
'quadratic', a single error bar will be displayed with either the
linear or quadratic sum of the individual errors.
xpadding : float or 2-tuple of floats, optional (default=0)
Padding to add on the left and right sides of the plot as a fraction of
the axes width after the padding has been added. Specify unique left
and right padding with a 2-tuple.
ypadding : float or 2-tuple of floats, optional (default=.1)
Padding to add on the top and bottom of the plot as a fraction of
the axes height after the padding has been added. Specify unique top
and bottom padding with a 2-tuple.
yerror_in_padding : bool, optional (default=True)
If True then make the padding inclusive of the y errors otherwise
only pad around the y values.
rwidth : float, optional (default=0.8)
The relative width of the bars as a fraction of the bin width.
snap : bool, optional (default=True)
If True (the default) then the origin is an implicit lower bound of the
histogram unless the histogram has both positive and negative bins.
axes : matplotlib Axes instance, optional (default=None)
The axes to plot on. If None then use the global current axes.
kwargs : additional keyword arguments, optional
All additional keyword arguments are passed to matplotlib's bar
function.
Returns
-------
The return value from matplotlib's bar function, or list of such return
values if a stack or list of histograms was plotted.
"""
if axes is None:
axes = plt.gca()
curr_xlim = axes.get_xlim()
curr_ylim = axes.get_ylim()
was_empty = not axes.has_data()
logy = kwargs.pop('log', axes.get_yscale() == 'log')
kwargs['log'] = logy
returns = []
if isinstance(hists, _Hist):
# This is a single histogram.
returns = _bar(hists, xerr=xerr, yerr=yerr,
axes=axes, **kwargs)
_set_bounds(hists, axes=axes,
was_empty=was_empty,
prev_xlim=curr_xlim,
prev_ylim=curr_ylim,
xpadding=xpadding, ypadding=ypadding,
yerror_in_padding=yerror_in_padding,
snap=snap,
logy=logy)
elif stacked == 'cluster':
nhists = len(hists)
hlist = _maybe_reversed(hists, reverse)
for i, h in enumerate(hlist):
width = rwidth / nhists
offset = (1 - rwidth) / 2 + i * width
returns.append(_bar(
h, offset, width,
xerr=xerr, yerr=yerr, axes=axes, **kwargs))
_set_bounds(sum(hists), axes=axes,
was_empty=was_empty,
prev_xlim=curr_xlim,
prev_ylim=curr_ylim,
xpadding=xpadding, ypadding=ypadding,
yerror_in_padding=yerror_in_padding,
snap=snap,
logy=logy)
elif stacked is True:
nhists = len(hists)
hlist = _maybe_reversed(hists, reverse)
toterr = bottom = None
if yerr == 'linear':
toterr = [sum([h.GetBinError(i) for h in hists])
for i in range(1, hists[0].nbins(0) + 1)]
elif yerr == 'quadratic':
toterr = [sqrt(sum([h.GetBinError(i) ** 2 for h in hists]))
for i in range(1, hists[0].nbins(0) + 1)]
for i, h in enumerate(hlist):
err = None
if yerr is True:
err = True
elif yerr and i == (nhists - 1):
err = toterr
returns.append(_bar(
h,
xerr=xerr, yerr=err,
bottom=list(bottom.y()) if bottom else None,
axes=axes, **kwargs))
if bottom is None:
bottom = h.Clone()
else:
bottom += h
_set_bounds(bottom, axes=axes,
was_empty=was_empty,
prev_xlim=curr_xlim,
prev_ylim=curr_ylim,
xpadding=xpadding, ypadding=ypadding,
yerror_in_padding=yerror_in_padding,
snap=snap,
logy=logy)
else:
hlist = _maybe_reversed(hists, reverse)
for h in hlist:
returns.append(_bar(h, xerr=xerr, yerr=yerr,
axes=axes, **kwargs))
_set_bounds(hists[max(range(len(hists)), key=lambda idx: hists[idx].max())],
axes=axes,
was_empty=was_empty,
prev_xlim=curr_xlim,
prev_ylim=curr_ylim,
xpadding=xpadding, ypadding=ypadding,
yerror_in_padding=yerror_in_padding,
snap=snap,
logy=logy)
return returns
def _bar(h, roffset=0., rwidth=1., xerr=None, yerr=None, axes=None, **kwargs):
if axes is None:
axes = plt.gca()
if xerr:
xerr = np.array([list(h.xerrl()), list(h.xerrh())])
if yerr:
yerr = np.array([list(h.yerrl()), list(h.yerrh())])
_set_defaults(h, kwargs, ['common', 'line', 'fill', 'errors'])
width = [x * rwidth for x in h.xwidth()]
left = [h.xedgesl(i) + h.xwidth(i) * roffset
for i in range(1, h.nbins(0) + 1)]
height = list(h.y())
return axes.bar(left, height, width=width, xerr=xerr, yerr=yerr, **kwargs)
def errorbar(hists,
xerr=True, yerr=True,
xpadding=0, ypadding=.1,
xerror_in_padding=True,
yerror_in_padding=True,
emptybins=True,
snap=True,
axes=None,
**kwargs):
"""
Make a matplotlib errorbar plot from a ROOT histogram or graph
or list of histograms and graphs.
Parameters
----------
hists : Hist, Graph or list of Hist and Graph
The histogram(s) and/or Graph(s) to be plotted
xerr : bool, optional (default=True)
If True, x error bars will be displayed.
yerr : bool or string, optional (default=True)
If False, no y errors are displayed. If True, an individual y
error will be displayed for each hist in the stack. If 'linear' or
'quadratic', a single error bar will be displayed with either the
linear or quadratic sum of the individual errors.
xpadding : float or 2-tuple of floats, optional (default=0)
Padding to add on the left and right sides of the plot as a fraction of
the axes width after the padding has been added. Specify unique left
and right padding with a 2-tuple.
ypadding : float or 2-tuple of floats, optional (default=.1)
Padding to add on the top and bottom of the plot as a fraction of
the axes height after the padding has been added. Specify unique top
and bottom padding with a 2-tuple.
xerror_in_padding : bool, optional (default=True)
If True then make the padding inclusive of the x errors otherwise
only pad around the x values.
yerror_in_padding : bool, optional (default=True)
If True then make the padding inclusive of the y errors otherwise
only pad around the y values.
emptybins : bool, optional (default=True)
If True (the default) then plot bins with zero content otherwise only
show bins with nonzero content.
snap : bool, optional (default=True)
If True (the default) then the origin is an implicit lower bound of the
histogram unless the histogram has both positive and negative bins.
axes : matplotlib Axes instance, optional (default=None)
The axes to plot on. If None then use the global current axes.
kwargs : additional keyword arguments, optional
All additional keyword arguments are passed to matplotlib's errorbar
function.
Returns
-------
The return value from matplotlib's errorbar function, or list of such
return values if a list of histograms and/or graphs was plotted.
"""
if axes is None:
axes = plt.gca()
curr_xlim = axes.get_xlim()
curr_ylim = axes.get_ylim()
was_empty = not axes.has_data()
if isinstance(hists, (_Hist, _Graph1DBase)):
# This is a single plottable object.
returns = _errorbar(
hists, xerr, yerr,
axes=axes, emptybins=emptybins, **kwargs)
_set_bounds(hists, axes=axes,
was_empty=was_empty,
prev_ylim=curr_ylim,
xpadding=xpadding, ypadding=ypadding,
xerror_in_padding=xerror_in_padding,
yerror_in_padding=yerror_in_padding,
snap=snap)
else:
returns = []
for h in hists:
returns.append(errorbar(
h, xerr=xerr, yerr=yerr, axes=axes,
xpadding=xpadding, ypadding=ypadding,
xerror_in_padding=xerror_in_padding,
yerror_in_padding=yerror_in_padding,
snap=snap,
emptybins=emptybins,
**kwargs))
return returns
def _errorbar(h, xerr, yerr, axes=None, emptybins=True, zorder=None, **kwargs):
if axes is None:
axes = plt.gca()
if zorder is None:
zorder = _get_highest_zorder(axes) + 1
_set_defaults(h, kwargs, ['common', 'errors', 'errorbar', 'marker'])
if xerr:
xerr = np.array([list(h.xerrl()), list(h.xerrh())])
if yerr:
yerr = np.array([list(h.yerrl()), list(h.yerrh())])
x = np.array(list(h.x()))
y = np.array(list(h.y()))
if not emptybins:
nonempty = y != 0
x = x[nonempty]
y = y[nonempty]
if xerr is not False and xerr is not None:
xerr = xerr[:, nonempty]
if yerr is not False and yerr is not None:
yerr = yerr[:, nonempty]
return axes.errorbar(x, y, xerr=xerr, yerr=yerr, zorder=zorder, **kwargs)
def step(h, logy=None, axes=None, **kwargs):
"""
Make a matplotlib step plot from a ROOT histogram.
Parameters
----------
h : Hist
A rootpy Hist
logy : bool, optional (default=None)
If True then clip the y range between 1E-300 and 1E300.
If None (the default) then automatically determine if the axes are
log-scale and if this clipping should be performed.
axes : matplotlib Axes instance, optional (default=None)
The axes to plot on. If None then use the global current axes.
kwargs : additional keyword arguments, optional
Additional keyword arguments are passed directly to
matplotlib's fill_between function.
Returns
-------
Returns the value from matplotlib's fill_between function.
"""
if axes is None:
axes = plt.gca()
if logy is None:
logy = axes.get_yscale() == 'log'
_set_defaults(h, kwargs, ['common', 'line'])
if kwargs.get('color') is None:
kwargs['color'] = h.GetLineColor('mpl')
y = np.array(list(h.y()) + [0.])
if logy:
np.clip(y, 1E-300, 1E300, out=y)
return axes.step(list(h.xedges()), y, where='post', **kwargs)
def fill_between(a, b, logy=None, axes=None, **kwargs):
"""
Fill the region between two histograms or graphs.
Parameters
----------
a : Hist
A rootpy Hist
b : Hist
A rootpy Hist
logy : bool, optional (default=None)
If True then clip the region between 1E-300 and 1E300.
If None (the default) then automatically determine if the axes are
log-scale and if this clipping should be performed.
axes : matplotlib Axes instance, optional (default=None)
The axes to plot on. If None then use the global current axes.
kwargs : additional keyword arguments, optional
Additional keyword arguments are passed directly to
matplotlib's fill_between function.
Returns
-------
Returns the value from matplotlib's fill_between function.
"""
if axes is None:
axes = plt.gca()
if logy is None:
logy = axes.get_yscale() == 'log'
if not isinstance(a, _Hist) or not isinstance(b, _Hist):
raise TypeError(
"fill_between only operates on 1D histograms")
a.check_compatibility(b, check_edges=True)
x = []
top = []
bottom = []
for abin, bbin in zip(a.bins(overflow=False), b.bins(overflow=False)):
up = max(abin.value, bbin.value)
dn = min(abin.value, bbin.value)
x.extend([abin.x.low, abin.x.high])
top.extend([up, up])
bottom.extend([dn, dn])
x = np.array(x)
top = np.array(top)
bottom = np.array(bottom)
if logy:
np.clip(top, 1E-300, 1E300, out=top)
np.clip(bottom, 1E-300, 1E300, out=bottom)
return axes.fill_between(x, top, bottom, **kwargs)
def hist2d(h, axes=None, colorbar=False, **kwargs):
"""
Draw a 2D matplotlib histogram plot from a 2D ROOT histogram.
Parameters
----------
h : Hist2D
A rootpy Hist2D
axes : matplotlib Axes instance, optional (default=None)
The axes to plot on. If None then use the global current axes.
colorbar : Boolean, optional (default=False)
If True, include a colorbar in the produced plot
kwargs : additional keyword arguments, optional
Additional keyword arguments are passed directly to
matplotlib's hist2d function.
Returns
-------
Returns the value from matplotlib's hist2d function.
"""
if axes is None:
axes = plt.gca()
X, Y = np.meshgrid(list(h.x()), list(h.y()))
x = X.ravel()
y = Y.ravel()
z = np.array(h.z()).T
# returns of hist2d: (counts, xedges, yedges, Image)
return_values = axes.hist2d(x, y, weights=z.ravel(),
bins=(list(h.xedges()), list(h.yedges())),
**kwargs)
if colorbar:
mappable = return_values[-1]
plt.colorbar(mappable, ax=axes)
return return_values
def imshow(h, axes=None, colorbar=False, **kwargs):
"""
Draw a matplotlib imshow plot from a 2D ROOT histogram.
Parameters
----------
h : Hist2D
A rootpy Hist2D
axes : matplotlib Axes instance, optional (default=None)
The axes to plot on. If None then use the global current axes.
colorbar : Boolean, optional (default=False)
If True, include a colorbar in the produced plot
kwargs : additional keyword arguments, optional
Additional keyword arguments are passed directly to
matplotlib's imshow function.
Returns
-------
Returns the value from matplotlib's imshow function.
"""
kwargs.setdefault('aspect', 'auto')
if axes is None:
axes = plt.gca()
z = np.array(h.z()).T
axis_image= axes.imshow(
z,
extent=[
h.xedges(1), h.xedges(h.nbins(0) + 1),
h.yedges(1), h.yedges(h.nbins(1) + 1)],
interpolation='nearest',
origin='lower',
**kwargs)
if colorbar:
plt.colorbar(axis_image, ax=axes)
return axis_image
def contour(h, axes=None, zoom=None, label_contour=False, **kwargs):
"""
Draw a matplotlib contour plot from a 2D ROOT histogram.
Parameters
----------
h : Hist2D
A rootpy Hist2D
axes : matplotlib Axes instance, optional (default=None)
The axes to plot on. If None then use the global current axes.
zoom : float or sequence, optional (default=None)
The zoom factor along the axes. If a float, zoom is the same for each
axis. If a sequence, zoom should contain one value for each axis.
The histogram is zoomed using a cubic spline interpolation to create
smooth contours.
label_contour : Boolean, optional (default=False)
If True, labels are printed on the contour lines.
kwargs : additional keyword arguments, optional
Additional keyword arguments are passed directly to
matplotlib's contour function.
Returns
-------
Returns the value from matplotlib's contour function.
"""
if axes is None:
axes = plt.gca()
x = np.array(list(h.x()))
y = np.array(list(h.y()))
z = np.array(h.z()).T
if zoom is not None:
from scipy import ndimage
if hasattr(zoom, '__iter__'):
zoom = list(zoom)
x = ndimage.zoom(x, zoom[0])
y = ndimage.zoom(y, zoom[1])
else:
x = ndimage.zoom(x, zoom)
y = ndimage.zoom(y, zoom)
z = ndimage.zoom(z, zoom)
return_values = axes.contour(x, y, z, **kwargs)
if label_contour:
plt.clabel(return_values)
return return_values
| gpl-3.0 |
radinformatics/whatisit | whatisit/apps/wordfish/export.py | 1 | 5283 | from django.http import (
HttpResponse,
JsonResponse
)
from django.http.response import (
HttpResponseRedirect,
HttpResponseForbidden,
Http404
)
from django.shortcuts import (
get_object_or_404,
render_to_response,
render,
redirect
)
from django.core.files.storage import FileSystemStorage
from django.core.files.move import file_move_safe
from django.contrib.auth.models import User
from django.apps import apps
from fnmatch import fnmatch
from whatisit.settings import (
MEDIA_ROOT,
MEDIA_URL
)
from whatisit.apps.wordfish.utils import (
get_collection_annotators,
get_report_collection,
get_report_set,
get_reportset_annotations
)
from whatisit.apps.wordfish.models import ReportSet
import pandas
import errno
import itertools
import os
import tempfile
############################################################################
# Exporting Data
############################################################################
def download_annotation_set_json(request,sid,uid):
'''a wrapper view for download annotation_set, setting return_json to True
:param sid: the report set id
:param uid: the user id to download
'''
return download_annotation_set(request,sid,uid,return_json=True)
def download_annotation_set(request,sid,uid,return_json=False):
'''download annotation set will download annotations for a particular user
and report set. Default returns a text/csv file, if return_json is true,
returns json file
:param sid: the report set id
:param uid: the user id to download
:param return_json: return a Json response instead (default is False)
'''
report_set = get_report_set(sid)
collection = report_set.collection
# Does the user have permission to edit?
requester = request.user
if requester == collection.owner or requester in collection.contributors.all():
user = User.objects.get(id=uid)
df = get_reportset_annotations(report_set,user)
if not return_json:
response = HttpResponse(df.to_csv(sep="\t"), content_type='text/csv')
export_name = "%s_%s_annotations.tsv" %(report_set.id,user.username)
response['Content-Disposition'] = 'attachment; filename="%s"' %(export_name)
return response
return JsonResponse(df.to_json(orient="records"))
messages.info(request,"You do not have permission to perform this action.")
return redirect('report_collection_details',cid=collection.id)
def download_data(request,cid):
'''download data returns a general view for a collection to download data,
meaning all reports, reports for a set, or annotations for a set
:param cid: the collection id
'''
collection = get_report_collection(cid)
# Does the user have permission to edit?
requester = request.user
if requester == collection.owner or requester in collection.contributors.all():
context = {"collection":collection,
"annotators":get_collection_annotators(collection),
"report_sets":ReportSet.objects.filter(collection=collection)}
return render(request, 'export/download_data.html', context)
messages.info(request,"You do not have permission to perform this action.")
return redirect('report_collection_details',cid=collection.id)
def download_reports_json(request,cid,sid=None):
'''download_reports_json is a wrapper for download_reports, ensuring
that a json response is returned
:param cid: the collection id
:param sid: the report set if, if provided use that report set.
'''
return download_reports(request,cid,sid=sid,return_json=True)
def download_reports(request,cid,sid=None,return_json=False):
'''download reports returns a tsv download for an entire collection of reports (not recommended)
or for reports within a collection (recommended)
:param cid: the collection id
:param sid: the report set if, if provided use that report set.
:param return_json: return a Json response instead (default is False)
'''
if sid != None:
report_set = get_report_set(sid)
collection = report_set.collection
reports = report_set.reports.all()
export_name = "%s_reports_set.tsv" %(report_set.id)
else:
collection = get_report_collection(cid)
reports = collection.report_set.all()
export_name = "%s_reports.tsv" %(collection.id)
# Does the user have permission to edit?
requester = request.user
if requester == collection.owner or requester in collection.contributors.all():
df = pandas.DataFrame(columns=["report_id","report_text"])
df["report_id"] = [r.report_id for r in reports]
df["report_text"] = [r.report_text for r in reports]
if not return_json:
response = HttpResponse(df.to_csv(sep="\t"), content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="%s"' %(export_name)
return response
else:
return JsonResponse(df.to_json(orient="records"))
messages.info(request,"You do not have permission to perform this action.")
return redirect('report_collection_details',cid=collection.id)
| mit |
adamrvfisher/TechnicalAnalysisLibrary | DualSMAstrategy.py | 1 | 3955 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 30 19:07:37 2017
@author: AmatVictoriaCuramIII
"""
import numpy as np
import random as rand
import pandas as pd
import time as t
from DatabaseGrabber import DatabaseGrabber
from YahooGrabber import YahooGrabber
from pandas import read_csv
Empty = []
Dataset = pd.DataFrame()
Portfolio = pd.DataFrame()
Start = t.time()
Counter = 0
#Input
Ticker1 = 'UVXY'
#Ticker2 = '^VIX'
#Remote Signal
#Ticker3 = '^VIX'
#Here we go
#30MinUVXY
#Asset1 = pd.read_csv('UVXYnew.csv')
#Daily UVXY
Asset1 = YahooGrabber(Ticker1)
#For CC futures csv
#Asset2 = read_csv('C:\\Users\\AmatVictoriaCuramIII\\Desktop\\Python\\VX1CC.csv', sep = ',')
#Asset2.Date = pd.to_datetime(Asset2.Date, format = "%m/%d/%Y")
#Asset2 = Asset2.set_index('Date')
#Asset2 = Asset2.reindex(index=Asset2.index[::-1])
#Out of Sample Selection
#Asset1 = Asset1[:-800]
##Log Returns
Asset1['LogRet'] = np.log(Asset1['Adj Close']/Asset1['Adj Close'].shift(1))
Asset1['LogRet'] = Asset1['LogRet'].fillna(0)
#Brute Force Optimization
iterations = range(0, 10000)
for i in iterations:
Counter = Counter + 1
a = 1
b = rand.randint(8,50) #small
c = rand.randint(8,500) #big
if b >= c:
continue
window = int(b)
window2 = int(c)
Asset1['MA'] = Asset1['Adj Close'].rolling(window=window, center=False).mean() #small
Asset1['MA2'] = Asset1['Adj Close'].rolling(window=window2, center=False).mean() #big
Asset1['Regime'] = np.where(Asset1['MA'] > Asset1['MA2'], 1 , -1)
Asset1['Strategy'] = (Asset1['LogRet'] * Asset1['Regime'])
#if Asset1['Strategy'].std() == 0:
# continue
Asset1['Multiplier'] = Asset1['Strategy'].cumsum().apply(np.exp)
drawdown = 1 - Asset1['Multiplier'].div(Asset1['Multiplier'].cummax())
MaxDD = max(drawdown)
# if MaxDD > float(.721):
# continue
dailyreturn = Asset1['Strategy'].mean()
# if dailyreturn < .003:
# continue
dailyvol = Asset1['Strategy'].std()
sharpe =(dailyreturn/dailyvol)
#Asset1['Strategy'][:].cumsum().apply(np.exp).plot(grid=True,
# figsize=(8,5))
print(Counter)
Empty.append(a)
Empty.append(b)
Empty.append(c)
Empty.append(sharpe)
Empty.append(sharpe/MaxDD)
Empty.append(dailyreturn/MaxDD)
Empty.append(MaxDD)
Emptyseries = pd.Series(Empty)
Dataset[0] = Emptyseries.values
Dataset[i] = Emptyseries.values
Empty[:] = []
z1 = Dataset.iloc[3]
w1 = np.percentile(z1, 80)
v1 = [] #this variable stores the Nth percentile of top performers
DS1W = pd.DataFrame() #this variable stores your financial advisors for specific dataset
for h in z1:
if h > w1:
v1.append(h)
for j in v1:
r = Dataset.columns[(Dataset == j).iloc[3]]
DS1W = pd.concat([DS1W,Dataset[r]], axis = 1)
y = max(z1)
k = Dataset.columns[(Dataset == y).iloc[3]] #this is the column number
kfloat = float(k[0])
End = t.time()
print(End-Start, 'seconds later')
print(Dataset[k])
window = int((Dataset[kfloat][1]))
window2 = int((Dataset[kfloat][2]))
Asset1['MA'] = Asset1['Adj Close'].rolling(window=window, center=False).mean() #small
Asset1['MA2'] = Asset1['Adj Close'].rolling(window=window2, center=False).mean() #big
Asset1['Regime'] = np.where(Asset1['MA'] > Asset1['MA2'], 1 , -1)
Asset1['Strategy'] = (Asset1['LogRet'] * Asset1['Regime'])
Asset1['Strategy'][:].cumsum().apply(np.exp).plot(grid=True,
figsize=(8,5))
dailyreturn = Asset1['Strategy'].mean()
dailyvol = Asset1['Strategy'].std()
sharpe =(dailyreturn/dailyvol)
Asset1['Multiplier'] = Asset1['Strategy'].cumsum().apply(np.exp)
drawdown = 1 - Asset1['Multiplier'].div(Asset1['Multiplier'].cummax())
print(max(drawdown))
| apache-2.0 |
lordkman/burnman | misc/pyrolite_uncertainty.py | 5 | 38404 | from __future__ import absolute_import
from __future__ import print_function
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
import os.path
import sys
if not os.path.exists('burnman') and os.path.exists('../burnman'):
sys.path.insert(1, os.path.abspath('..'))
import numpy as np
import matplotlib.pyplot as plt
import numpy.ma as ma
import numpy.random
import burnman
import pickle
from burnman import minerals
from misc.helper_solid_solution import HelperSolidSolution
import matplotlib.cm
import matplotlib.colors
from scipy import interpolate
from scipy.stats import norm
import matplotlib.mlab as mlab
import misc.colors as colors
import signal
import sys
def signal_handler(signal, frame):
print('You pressed Ctrl+C!')
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
def normal(loc=0.0, scale=1.0):
if scale <= 0.0:
return 0.0
else:
return numpy.random.normal(loc, scale)
def realize_mineral(mineral):
# some of the minerals are missing an uncertainty for this. Assign a
# characteristic nominal value for those
if mineral.uncertainties['err_Gprime_0'] == 0.0:
mineral.uncertainties['err_Gprime_0'] = 0.1
# sample the uncertainties for all the relevant parameters. Assume that
# molar mass, V0, and n are well known
mineral.params['K_0'] = mineral.params['K_0'] + \
normal(scale=mineral.uncertainties['err_K_0'])
mineral.params['Kprime_0'] = mineral.params['Kprime_0'] + \
normal(scale=mineral.uncertainties['err_Kprime_0'])
mineral.params['G_0'] = mineral.params['G_0'] + \
normal(scale=mineral.uncertainties['err_G_0'])
mineral.params['Gprime_0'] = mineral.params['Gprime_0'] + \
normal(scale=mineral.uncertainties['err_Gprime_0'])
mineral.params['Debye_0'] = mineral.params['Debye_0'] + \
normal(scale=mineral.uncertainties['err_Debye_0'])
mineral.params['grueneisen_0'] = mineral.params['grueneisen_0'] + \
normal(scale=mineral.uncertainties['err_grueneisen_0'])
mineral.params['q_0'] = mineral.params['q_0'] + \
normal(scale=mineral.uncertainties['err_q_0'])
mineral.params['eta_s_0'] = mineral.params['eta_s_0'] + \
normal(scale=mineral.uncertainties['err_eta_s_0'])
return mineral
def realize_pyrolite():
# approximate four component pyrolite model
x_pv = 0.67
x_fp = 0.33
pv_fe_num = 0.07
fp_fe_num = 0.2
mg_perovskite = minerals.SLB_2011_ZSB_2013.mg_perovskite()
realize_mineral(mg_perovskite)
fe_perovskite = minerals.SLB_2011_ZSB_2013.fe_perovskite()
realize_mineral(fe_perovskite)
wuestite = minerals.SLB_2011_ZSB_2013.wuestite()
realize_mineral(wuestite)
periclase = minerals.SLB_2011_ZSB_2013.periclase()
realize_mineral(periclase)
perovskite = HelperSolidSolution(
[mg_perovskite, fe_perovskite], [1.0 - pv_fe_num, pv_fe_num])
ferropericlase = HelperSolidSolution(
[periclase, wuestite], [1.0 - fp_fe_num, fp_fe_num])
pyrolite = burnman.Composite([perovskite, ferropericlase], [x_pv, x_fp])
pyrolite.set_method('slb3')
anchor_temperature = normal(loc=1935.0, scale=200.0)
return pyrolite, anchor_temperature
def output_rock(rock, file_handle):
for ph in rock.phases:
if(isinstance(ph, HelperSolidSolution)):
for min in ph.endmembers:
file_handle.write('\t' + min.to_string() + '\n')
for key in min.params:
file_handle.write(
'\t\t' + key + ': ' + str(min.params[key]) + '\n')
else:
file_handle.write('\t' + ph.to_string() + '\n')
for key in ph.params:
file_handle.write(
'\t\t' + key + ': ' + str(ph.params[key]) + '\n')
def realization_to_array(rock, anchor_t):
arr = [anchor_t]
names = ['anchor_T']
for ph in rock.phases:
if isinstance(ph, HelperSolidSolution):
for min in ph.endmembers:
for key in min.params:
if key != 'equation_of_state':
arr.append(min.params[key])
names.append(min.to_string() + '.' + key)
else:
for key in ph.params:
if key != 'equation_of_state':
arr.append(ph.mineral.params[key])
names.append(ph.mineral.to_string() + '.' + key)
return arr, names
def array_to_rock(arr, names):
rock, _ = realize_pyrolite()
anchor_t = arr[0]
idx = 1
for phase in rock.phases:
if isinstance(phase, HelperSolidSolution):
for mineral in phase.endmembers:
while mineral.to_string() in names[idx]:
key = names[idx].split('.')[-1]
if key != 'equation_of_state' and key != 'F_0' and key != 'T_0' and key != 'P_0':
assert(mineral.to_string() in names[idx])
mineral.params[key] = arr[idx]
idx += 1
else:
raise Exception("unknown type")
return rock, anchor_t
# set up the seismic model
seismic_model = burnman.seismic.PREM()
npts = 10
depths = np.linspace(850e3, 2700e3, npts)
pressure, seis_rho, seis_vp, seis_vs, seis_vphi = seismic_model.evaluate(
['pressure', 'density', 'v_p', 'v_s', 'v_phi'], depths)
n_realizations = 10000
min_error = np.inf
pressures_sampled = np.linspace(pressure[0], pressure[-1], 20 * len(pressure))
fname = 'output_pyrolite_uncertainty.txt'
whattodo = ""
dbname = ""
goodfits = []
names = []
if len(sys.argv) >= 2:
whattodo = sys.argv[1]
if len(sys.argv) >= 3:
dbname = sys.argv[2]
if whattodo == "plotgood" and len(sys.argv) > 2:
files = sys.argv[2:]
print("files:", files)
names = pickle.load(open(files[0] + ".names", "rb"))
erridx = names.index("err")
print(erridx)
allfits = []
for f in files:
a = pickle.load(open(f, "rb"))
allfits.extend(a)
b = a
# b = [i for i in a if i[erridx]<3e-5] -- filter, need to adjust error
# value
print("adding %d out of %d" % (len(b), len(a)))
goodfits.extend(b)
minerr = min([f[erridx] for f in allfits])
print("min error is %f" % minerr)
num = len(goodfits)
print("we have %d good entries" % num)
i = 0
idx = 0
figsize = (20, 15)
font = {'family': 'normal',
'weight': 'normal',
'size': 8}
matplotlib.rc('font', **font)
prop = {'size': 12}
# plt.rc('text', usetex=True)
plt.rcParams['text.latex.preamble'] = r'\usepackage{relsize}'
plt.rc('font', family='sans-serif')
figure = plt.figure(dpi=150, figsize=figsize)
plt.subplots_adjust(hspace=0.3)
for name in names:
if name.endswith(".n") or name.endswith(".V_0") or name.endswith(".molar_mass") or name.endswith(".F_0") or name.endswith(".P_0"):
i += 1
continue
plt.subplot(5, 8, idx)
idx += 1
shortname = name.replace("'burnman.minerals.SLB_2011_ZSB_2013", "").replace(
"'", "").replace("perovskite", "p")
trace = []
for entry in allfits:
trace.append(entry[i])
# n, bins, patches = plt.hist(np.array(trace), 20, normed=1,
# facecolor='blue', alpha=0.75)
hist, bins = np.histogram(np.array(trace), bins=50, density=True)
(mu, sigma) = norm.fit(np.array(trace))
y = mlab.normpdf(bins, mu, sigma)
if sigma > 1e-10 and not shortname.startswith("err"):
l = plt.plot(bins, y, 'b--', linewidth=1)
trace = []
if shortname.startswith("err"):
shortname += "(log)"
for entry in goodfits:
trace.append(np.log(entry[i]) / np.log(10))
else:
for entry in goodfits:
trace.append(entry[i])
hist, bins = np.histogram(trace)
n, bins, patches = plt.hist(
np.array(trace), 20, facecolor='green', alpha=0.75, normed=True)
(mu, sigma) = norm.fit(np.array(trace))
y = mlab.normpdf(bins, mu, sigma)
if sigma > 1e-10 and not shortname.startswith("err"):
l = plt.plot(bins, y, 'r--', linewidth=1)
plt.title("%s\nmean %.3e sd: %.3e" %
(shortname, mu, sigma), fontsize=8)
i += 1
plt.savefig('good.png')
print("Writing good.png")
# plt.show()
figsize = (8, 6)
figure = plt.figure(dpi=150, figsize=figsize)
for fit in goodfits:
# print(fit)
# print(names)
rock, anchor_t = array_to_rock(fit, names)
temperature = burnman.geotherm.adiabatic(pressure, anchor_t, rock)
rock.set_averaging_scheme(
burnman.averaging_schemes.HashinShtrikmanAverage())
rho, vs, vphi = rock.evaluate(
['rho', 'v_s', 'v_phi'], pressure, temperature)
print(".")
plt.plot(pressure / 1.e9, vs / 1.e3,
linestyle="-", color='r', linewidth=1.0)
plt.plot(pressure / 1.e9, vphi / 1.e3,
linestyle="-", color='b', linewidth=1.0)
plt.plot(pressure / 1.e9, rho / 1.e3,
linestyle="-", color='g', linewidth=1.0)
print("done!")
# plot v_s
plt.plot(pressure / 1.e9, seis_vs / 1.e3, linestyle="--",
color='k', linewidth=2.0, label='PREM')
# plot v_phi
plt.plot(pressure / 1.e9, seis_vphi / 1.e3,
linestyle="--", color='k', linewidth=2.0, label='PREM')
# plot density
plt.plot(pressure / 1.e9, seis_rho / 1.e3, linestyle="--",
color='k', linewidth=2.0, label='PREM')
plt.savefig('goodones.png')
print("Writing goodones.png")
plt.show()
elif whattodo == "plotone":
figsize = (6, 5)
figure = plt.figure(dpi=100, figsize=figsize)
names = [
'anchor_T', "'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.Gprime_0", "'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.K_0", "'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.G_0", "'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.q_0", "'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.Kprime_0", "'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.grueneisen_0", "'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.V_0", "'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.Debye_0", "'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.molar_mass", "'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.n", "'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.eta_s_0", "'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.Gprime_0", "'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.K_0", "'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.G_0", "'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.q_0", "'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.Kprime_0", "'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.grueneisen_0", "'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.V_0", "'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.Debye_0", "'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.molar_mass", "'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.n",
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.eta_s_0", "'burnman.minerals.SLB_2011_ZSB_2013.periclase'.Gprime_0", "'burnman.minerals.SLB_2011_ZSB_2013.periclase'.K_0", "'burnman.minerals.SLB_2011_ZSB_2013.periclase'.G_0", "'burnman.minerals.SLB_2011_ZSB_2013.periclase'.q_0", "'burnman.minerals.SLB_2011_ZSB_2013.periclase'.Kprime_0", "'burnman.minerals.SLB_2011_ZSB_2013.periclase'.grueneisen_0", "'burnman.minerals.SLB_2011_ZSB_2013.periclase'.V_0", "'burnman.minerals.SLB_2011_ZSB_2013.periclase'.Debye_0", "'burnman.minerals.SLB_2011_ZSB_2013.periclase'.molar_mass", "'burnman.minerals.SLB_2011_ZSB_2013.periclase'.n", "'burnman.minerals.SLB_2011_ZSB_2013.periclase'.eta_s_0", "'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.Gprime_0", "'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.K_0", "'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.G_0", "'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.q_0", "'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.Kprime_0", "'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.grueneisen_0", "'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.V_0", "'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.Debye_0", "'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.molar_mass", "'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.n", "'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.eta_s_0", 'err', 'err_rho', 'err_vphi', 'err_vs']
mymapbestfitnotused = {'anchor_T': 2000,
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.Gprime_0": 1.779,
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.K_0": 2.500e11,
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.G_0": 1.728e11,
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.q_0": 1.098,
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.Kprime_0": 3.917,
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.grueneisen_0": 1.442,
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.V_0": 2.445e-05,
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.Debye_0": 9.057e2,
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.molar_mass": 0.1,
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.n": 5,
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.eta_s_0": 2.104,
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.Gprime_0": 1.401,
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.K_0": 2.637e11,
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.G_0": 1.329e11,
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.q_0": 1.084,
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.Kprime_0": 3.428,
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.grueneisen_0": 1.568,
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.V_0": 2.549e-05,
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.Debye_0": 8.707e2,
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.molar_mass": 0.1319,
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.n": 5,
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.eta_s_0": 2.335,
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.Gprime_0": 2.108,
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.K_0": 1.610e11,
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.G_0": 1.310e11,
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.q_0": 1.700,
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.Kprime_0": 3.718,
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.grueneisen_0": 1.359,
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.V_0": 1.124e-05,
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.Debye_0": 7.672,
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.molar_mass": 0.0403,
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.n": 2,
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.eta_s_0": 2.804,
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.Gprime_0": 1.405,
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.K_0": 1.790e11,
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.G_0": 5.905e10,
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.q_0": 1.681,
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.Kprime_0": 4.884,
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.grueneisen_0": 1.532,
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.V_0": 1.226e-05,
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.Debye_0": 4.543e2,
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.molar_mass": 0.0718,
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.n": 2,
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.eta_s_0": -7.048e-2,
'err': 0,
'err_rho': 0,
'err_vphi': 0,
'err_vs': 0}
print(
"goal: 5.35427067017e-06 2.72810809096e-07 3.67937164518e-06 1.4020882159e-06")
mymaplit = {'anchor_T': 2000,
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.Gprime_0": 1.74, # r:1.74
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.K_0": 250.5e9,
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.G_0": 172.9e9,
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.q_0": 1.09,
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.Kprime_0": 4.01, # r:4.01
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.grueneisen_0": 1.44,
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.V_0": 24.45e-6,
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.Debye_0": 9.059e2,
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.molar_mass": 0.1,
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.n": 5,
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.eta_s_0": 2.13,
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.Gprime_0": 1.4,
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.K_0": 2.72e11, # b: 2.637e11, r:2.72e11
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.G_0": 1.33e11,
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.q_0": 1.1,
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.Kprime_0": 4.1, # r: 4.1
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.grueneisen_0": 1.57,
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.V_0": 2.549e-05,
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.Debye_0": 8.71e2,
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.molar_mass": 0.1319,
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.n": 5,
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.eta_s_0": 2.3,
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.Gprime_0": 2.1,
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.K_0": 161e9,
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.G_0": 1.310e11,
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.q_0": 1.700,
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.Kprime_0": 3.8, # b: 3.718 r:3.8
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.grueneisen_0": 1.36,
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.V_0": 1.124e-05,
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.Debye_0": 767,
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.molar_mass": 0.0403,
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.n": 2,
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.eta_s_0": 2.8,
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.Gprime_0": 1.4, # r: 1.4
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.K_0": 1.790e11,
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.G_0": 59.0e9,
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.q_0": 1.7,
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.Kprime_0": 4.9,
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.grueneisen_0": 1.53,
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.V_0": 1.226e-05,
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.Debye_0": 4.54e2,
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.molar_mass": 0.0718,
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.n": 2,
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.eta_s_0": -0.1,
'err': 0,
'err_rho': 0,
'err_vphi': 0,
'err_vs': 0}
mymap = {'anchor_T': 2000,
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.Gprime_0": 1.779, # r:1.74
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.K_0": 250.5e9,
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.G_0": 172.9e9,
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.q_0": 1.09,
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.Kprime_0": 3.917, # r:4.01
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.grueneisen_0": 1.44,
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.V_0": 24.45e-6,
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.Debye_0": 9.059e2,
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.molar_mass": 0.1,
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.n": 5,
"'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.eta_s_0": 2.13,
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.Gprime_0": 1.4,
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.K_0": 2.637e11, # b: 2.637e11, r:2.72e11
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.G_0": 1.33e11,
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.q_0": 1.1,
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.Kprime_0": 3.428, # r: 4.1
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.grueneisen_0": 1.57,
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.V_0": 2.549e-05,
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.Debye_0": 8.71e2,
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.molar_mass": 0.1319,
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.n": 5,
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.eta_s_0": 2.3,
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.Gprime_0": 2.1,
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.K_0": 161e9,
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.G_0": 1.310e11,
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.q_0": 1.700,
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.Kprime_0": 3.718, # b: 3.718 r:3.8
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.grueneisen_0": 1.36,
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.V_0": 1.124e-05,
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.Debye_0": 767,
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.molar_mass": 0.0403,
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.n": 2,
"'burnman.minerals.SLB_2011_ZSB_2013.periclase'.eta_s_0": 2.8,
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.Gprime_0": 1.4, # r: 1.4
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.K_0": 1.790e11,
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.G_0": 59.0e9,
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.q_0": 1.7,
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.Kprime_0": 4.9,
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.grueneisen_0": 1.53,
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.V_0": 1.226e-05,
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.Debye_0": 4.54e2,
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.molar_mass": 0.0718,
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.n": 2,
"'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.eta_s_0": -0.1,
'err': 0,
'err_rho': 0,
'err_vphi': 0,
'err_vs': 0}
# make table:
rows = ["V_0", "K_0", "Kprime_0", "G_0", "Gprime_0",
"molar_mass", "n", "Debye_0", "grueneisen_0", "q_0", "eta_s_0"]
for row in rows:
val = []
for n in names:
if "." + row in n:
val.append(mymap[n])
print(row, "& %g && %g && %g && %g & \\" %
(val[0], val[1], val[2], val[3]))
dashstyle2 = (7, 3)
dashstyle3 = (3, 2)
fit = []
lit = []
for n in names:
fit.append(mymap[n])
lit.append(mymaplit[n])
rock, anchor_t = array_to_rock(fit, names)
temperature = burnman.geotherm.adiabatic(pressure, anchor_t, rock)
rock.set_averaging_scheme(
burnman.averaging_schemes.HashinShtrikmanAverage())
rho, vs, vphi = rock.evaluate(
['rho', 'v_s', 'v_phi'], pressure, temperature)
err_vs, err_vphi, err_rho = burnman.compare_l2(
depths, [vs, vphi, rho], [seis_vs, seis_vphi, seis_rho])
error = np.sum(
[err_rho / np.mean(seis_rho), err_vphi / np.mean(seis_vphi), err_vs / np.mean(seis_vs)])
figsize = (6, 5)
prop = {'size': 12}
plt.rc('text', usetex=True)
plt.rcParams['text.latex.preamble'] = r'\usepackage{relsize}'
plt.rc('font', family='sans-serif')
figure = plt.figure(dpi=100, figsize=figsize)
# plot v_s
plt.plot(pressure / 1.e9, seis_vs / 1.e3, linestyle="-",
color='k', linewidth=2.0, label='PREM')
# plot v_phi
plt.plot(pressure / 1.e9, seis_vphi / 1.e3,
linestyle="-", color='k', linewidth=2.0)
# plot density
plt.plot(pressure / 1.e9, seis_rho / 1.e3,
linestyle="-", color='k', linewidth=2.0)
plt.plot(
pressure / 1.e9, vphi / 1.e3, linestyle="-", color=colors.color(3),
linewidth=1.0, marker='s', markerfacecolor=colors.color(3), label="vphi")
plt.plot(pressure / 1.e9, vs / 1.e3, linestyle="-", color=colors.color(4),
linewidth=1.0, marker='v', markerfacecolor=colors.color(4), label="vs")
plt.plot(pressure / 1.e9, rho / 1.e3, linestyle="-", color=colors.color(2),
linewidth=1.0, marker='o', markerfacecolor=colors.color(2), label="rho")
rock, anchor_t = array_to_rock(lit, names)
temperature = burnman.geotherm.adiabatic(pressure, anchor_t, rock)
rock.set_averaging_scheme(
burnman.averaging_schemes.HashinShtrikmanAverage())
rho, vs, vphi = rock.evaluate(
['rho', 'v_s', 'v_phi'], pressure, temperature)
plt.plot(pressure / 1.e9, vs / 1.e3, dashes=dashstyle2,
color=colors.color(4), linewidth=1.0)
plt.plot(pressure / 1.e9, vphi / 1.e3, dashes=dashstyle2,
color=colors.color(3), linewidth=1.0, label="literature")
plt.plot(pressure / 1.e9, rho / 1.e3, dashes=dashstyle2,
color=colors.color(2), linewidth=1.0)
plt.xlabel("Pressure (GPa)")
plt.ylabel("Velocities (km/s) and Density ($\cdot 10^3$ kg/m$^3$)")
plt.legend(bbox_to_anchor=(1.0, 0.9), prop={'size': 12})
plt.xlim(25, 135)
# plt.ylim(6,11)
plt.savefig("onefit.pdf", bbox_inches='tight')
print("wrote onefit.pdf")
# plt.show()
elif whattodo == "run" and len(sys.argv) > 2:
outfile = open(fname, 'w')
outfile.write("#pressure\t Vs \t Vp \t rho \n")
best_fit_file = open('output_pyrolite_closest_fit.txt', 'w')
for i in range(n_realizations):
if (i > 0 and i % 25 == 0):
# save good fits
print("saving %d fits to %s" % (len(goodfits), dbname))
pickle.dump(goodfits, open(dbname + ".tmp", "wb"))
os.rename(dbname + ".tmp", dbname)
pickle.dump(names, open(dbname + ".names", "wb"))
print("realization", i + 1)
try:
# create the ith model
pyrolite, anchor_temperature = realize_pyrolite()
temperature = burnman.geotherm.adiabatic(
pressure, anchor_temperature, pyrolite)
# calculate the seismic observables
pyrolite.set_averaging_scheme(
burnman.averaging_schemes.HashinShtrikmanAverage())
rho, vs, vphi = pyrolite.evaluate(
['rho', 'v_s', 'v_phi'], pressure, temperature)
# estimate the misfit with the seismic model
err_vs, err_vphi, err_rho = burnman.compare_l2(
depths, [vs, vphi, rho], [seis_vs, seis_vphi, seis_rho])
error = np.sum(
[err_rho / np.mean(seis_rho), err_vphi / np.mean(seis_vphi), err_vs / np.mean(seis_vs)])
if error < min_error:
min_error = error
print("new best realization with error", error)
best_fit_file.write('Current best fit : ' + str(error) + '\n')
output_rock(pyrolite, best_fit_file)
a, names = realization_to_array(pyrolite, anchor_temperature)
a.extend([error, err_rho, err_vphi, err_vs])
names.extend(["err", "err_rho", "err_vphi", "err_vs"])
goodfits.append(a)
# interpolate to a higher resolution line
frho = interpolate.interp1d(pressure, rho)
fs = interpolate.interp1d(pressure, vs)
fphi = interpolate.interp1d(pressure, vphi)
pressure_list = pressures_sampled
density_list = frho(pressures_sampled)
vs_list = fs(pressures_sampled)
vphi_list = fphi(pressures_sampled)
data = list(zip(pressure_list, vs_list, vphi_list, density_list))
np.savetxt(outfile, data, fmt='%.10e', delimiter='\t')
except ValueError:
print("failed, skipping")
outfile.close()
best_fit_file.close()
elif whattodo == "error":
values = [
1957.020221991886, 1.6590112209181886, 249335164670.39246, 170883524675.03842, 0.8922515920546608, 4.083536182853109, 1.4680357687136616, 2.445e-05, 907.6618871363347, 0.1, 5, 1.4575168081960164, 1.3379195339709193, 260344929478.3809, 138077598973.27307, 0.17942226498091196, 1.3948903373340595, 1.436924855529012, 2.549e-05, 881.2532665499875, 0.1319, 5, 3.1204661890247394, 2.1411938868468483, 164407523972.7836,
131594720803.07439, 1.855224221011796, 3.867545309505681, 1.2953203656315155, 1.124e-05, 769.8199298156555, 0.0403, 2, 2.8860489779521985, 1.4263617489128713, 177341125271.45096, 59131041052.46985, 2.352310980469468, 5.1279202520952545, 1.6021924873676925, 1.226e-05, 440.13042122457716, 0.0718, 2, -1.6065263588976038, 7.5954915681374134e-05, 9.6441602176002807e-07, 4.4326026287552629e-05, 3.0664473372061482e-05]
names = [
'anchor_T', "'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.Gprime_0", "'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.K_0", "'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.G_0", "'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.q_0", "'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.Kprime_0", "'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.grueneisen_0", "'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.V_0", "'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.Debye_0", "'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.molar_mass", "'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.n", "'burnman.minerals.SLB_2011_ZSB_2013.mg_perovskite'.eta_s_0", "'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.Gprime_0", "'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.K_0", "'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.G_0", "'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.q_0", "'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.Kprime_0", "'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.grueneisen_0", "'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.V_0", "'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.Debye_0", "'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.molar_mass", "'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.n",
"'burnman.minerals.SLB_2011_ZSB_2013.fe_perovskite'.eta_s_0", "'burnman.minerals.SLB_2011_ZSB_2013.periclase'.Gprime_0", "'burnman.minerals.SLB_2011_ZSB_2013.periclase'.K_0", "'burnman.minerals.SLB_2011_ZSB_2013.periclase'.G_0", "'burnman.minerals.SLB_2011_ZSB_2013.periclase'.q_0", "'burnman.minerals.SLB_2011_ZSB_2013.periclase'.Kprime_0", "'burnman.minerals.SLB_2011_ZSB_2013.periclase'.grueneisen_0", "'burnman.minerals.SLB_2011_ZSB_2013.periclase'.V_0", "'burnman.minerals.SLB_2011_ZSB_2013.periclase'.Debye_0", "'burnman.minerals.SLB_2011_ZSB_2013.periclase'.molar_mass", "'burnman.minerals.SLB_2011_ZSB_2013.periclase'.n", "'burnman.minerals.SLB_2011_ZSB_2013.periclase'.eta_s_0", "'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.Gprime_0", "'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.K_0", "'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.G_0", "'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.q_0", "'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.Kprime_0", "'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.grueneisen_0", "'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.V_0", "'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.Debye_0", "'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.molar_mass", "'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.n", "'burnman.minerals.SLB_2011_ZSB_2013.wuestite'.eta_s_0", 'err', 'err_rho', 'err_vphi', 'err_vs']
rock, anchor_t = array_to_rock(values, names)
temperature = burnman.geotherm.adiabatic(pressure, anchor_t, rock)
rock.set_averaging_scheme(
burnman.averaging_schemes.HashinShtrikmanAverage())
rho, vs, vphi = rock.evaluate(
['rho', 'v_s', 'v_phi'], pressure, temperature)
err_vs, err_vphi, err_rho = burnman.compare_l2(
depths, [vs, vphi, rho], [seis_vs, seis_vphi, seis_rho])
error = np.sum([err_rho, err_vphi, err_vs])
print(error, err_rho, err_vphi, err_vs)
elif whattodo == "plot":
infile = open(fname, 'r')
data = np.loadtxt(fname, skiprows=1)
pressure_list = data[:, 0]
density_list = data[:, 3]
vs_list = data[:, 1]
vphi_list = data[:, 2]
infile.close()
density_hist, rho_xedge, rho_yedge = np.histogram2d(
pressure_list, density_list, bins=len(pressures_sampled), normed=True)
vs_hist, vs_xedge, vs_yedge = np.histogram2d(
pressure_list, vs_list, bins=len(pressures_sampled), normed=True)
vphi_hist, vphi_xedge, vphi_yedge = np.histogram2d(
pressure_list, vphi_list, bins=len(pressures_sampled), normed=True)
vs_xedge /= 1.e9
vphi_xedge /= 1.e9
rho_xedge /= 1.e9
vs_yedge /= 1.e3
vphi_yedge /= 1.e3
rho_yedge /= 1.e3
left_edge = min(vs_xedge[0], vphi_xedge[0], rho_xedge[0])
right_edge = max(vs_xedge[-1], vphi_xedge[-1], rho_xedge[-1])
bottom_edge = 4.3
top_edge = 11.3
aspect_ratio = (right_edge - left_edge) / (top_edge - bottom_edge)
gamma = 0.8 # Mess with this to change intensity of colormaps near the edges
# do some setup for the figure
plt.rc('text', usetex=True)
plt.rcParams['text.latex.preamble'] = r'\usepackage{relsize}'
plt.rc('font', family='sans-serif')
plt.subplots_adjust(wspace=0.3)
plt.subplot(111, aspect='equal')
plt.xlim(left_edge, right_edge)
plt.ylim(bottom_edge, top_edge)
plt.xlabel('Pressure (GPa)')
plt.ylabel('Wave Speed (km/s)')
# plot v_s
vs_hist = ma.masked_where(vs_hist <= 0.0, vs_hist)
c = matplotlib.colors.LinearSegmentedColormap.from_list(
'vphi', [(0, '#ffffff'), (0.2, '#eff3ff'), (0.4, '#bdd7e7'), (0.6, '#6baed6'), (0.8, '#3182bd'), (1.0, '#08519c')], gamma=gamma)
c.set_bad('w', alpha=1.0)
plt.imshow(
vs_hist.transpose(), origin='low', cmap=c, interpolation='gaussian', alpha=.7,
aspect=aspect_ratio, extent=[vs_xedge[0], vs_xedge[-1], vs_yedge[0], vs_yedge[-1]])
plt.plot(pressure / 1.e9, seis_vs / 1.e3, linestyle="--",
color='k', linewidth=2.0, label='PREM')
# plot v_phi
vphi_hist = ma.masked_where(vphi_hist <= 0.0, vphi_hist)
c = matplotlib.colors.LinearSegmentedColormap.from_list(
'vphi', [(0, '#ffffff'), (0.2, '#fee5d9'), (0.4, '#fcae91'), (0.6, '#fb6a4a'), (0.8, '#de2d26'), (1.0, '#a50f15')], gamma=gamma)
c.set_bad('w', alpha=1.0)
plt.imshow(
vphi_hist.transpose(), origin='low', cmap=c, interpolation='gaussian', alpha=.7,
aspect=aspect_ratio, extent=[vphi_xedge[0], vphi_xedge[-1], vphi_yedge[0], vphi_yedge[-1]])
plt.plot(pressure / 1.e9, seis_vphi / 1.e3,
linestyle="--", color='k', linewidth=2.0, label='PREM')
# plot density
density_hist = ma.masked_where(density_hist <= 0.0, density_hist)
c = matplotlib.colors.LinearSegmentedColormap.from_list(
'vphi', [(0, '#ffffff'), (0.2, '#edf8e9'), (0.4, '#bae4b3'), (0.6, '#74c476'), (0.8, '#31a354'), (1.0, '#006d2c')], gamma=gamma)
c.set_bad('w', alpha=1.0)
plt.imshow(
density_hist.transpose(), origin='low', cmap=c, interpolation='gaussian', alpha=.7,
aspect=aspect_ratio, extent=[rho_xedge[0], rho_xedge[-1], rho_yedge[0], rho_yedge[-1]])
plt.plot(pressure / 1.e9, seis_rho / 1.e3, linestyle="--",
color='k', linewidth=2.0, label='PREM')
# save and show the image
fig = plt.gcf()
fig.set_size_inches(6.0, 6.0)
if "RUNNING_TESTS" not in globals():
fig.savefig("pyrolite_uncertainty.pdf", bbox_inches='tight', dpi=100)
print("Writing pyrolite_uncertainty.pdf")
plt.show()
else:
print("Options:")
print(
" run <dbname> -- run realizations and write into given database name")
print(" plot <dbname> -- plot given database")
print(
" plotgood <dbname1> <dbname2> ... -- aggregate databases and plot")
print(" plotone -- plot a single hardcoded nice result")
print(
" error -- testing, compute errors of a single hardcoded realization")
| gpl-2.0 |
jpautom/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
SGMAP-AGD/Nomenclature_Update | comparaison/test_two_outputs.py | 2 | 2207 | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 22 16:30:50 2016
@author: Alexis Eidelman
"""
import pandas as pd
name1 = 'output.csv'
name2 = 'output_dev.csv'
tab1 = pd.read_csv('data\\' + name1, sep=';')
tab2 = pd.read_csv('data\\' + name2, sep=',') #why is it a ','
## étude du nom des colonnes
def _compare(list1, list2):
set1 = set(list1)
set2 = set(list2)
removed = set1 - set2
added = set2 - set1
return added, removed
in_2_not_in_1, in_1_not_in_2 = _compare(tab1.columns, tab2.columns)
print('dans 1 et pas dans 2', in_1_not_in_2)
print('dans 2 et pas dans 1', in_2_not_in_1)
tab1.drop(in_1_not_in_2, axis=1, inplace=True)
tab2.drop(in_2_not_in_1, axis=1, inplace=True)
# order tab2 as tab1 (may be useless)
tab2 = tab2[tab1.columns]
# differents types
string_columns = tab1.dtypes[tab1.dtypes == 'object'].index.tolist()
float_columns = [x for x in tab1.columns if x not in string_columns]
# comparaison des types
pb_dtypes = tab1.dtypes != tab2.dtypes
## étude des index
_compare(tab1.index, tab2.index)
len(tab2) - len(tab1) #1534 différences
tab1.CODGEO = tab1.CODGEO.astype(str)
tab2.CODGEO = tab2.CODGEO.astype(str)
tab1['CODGEO'][tab1.CODGEO.str.len() == 8] = '0' + tab1['CODGEO'][tab1.CODGEO.str.len() == 8]
tab2['CODGEO'][tab2.CODGEO.str.len() == 8] = '0' + tab2['CODGEO'][tab2.CODGEO.str.len() == 8]
in_2_not_in_1, in_1_not_in_2 = _compare(tab1.CODGEO, tab2.CODGEO)
# CODGEO => OK
tab1.sort_values('CODGEO', inplace=True)
tab2.sort_values('CODGEO', inplace=True)
## en passant, il faut probablement un autre programme mais il nous faut vérifier pourquoi
# on a des doublons de CODGEO, ie, pourquoi on n'a pas toujours les mêmes caractérisitques
doublons = tab1['CODGEO'].value_counts() > 1
doublons = doublons[doublons].index.tolist()
# exemple
tab1.loc[tab1['CODGEO'] == doublons[2], string_columns] # a chaque fois, on a une valeurs nulle
tab1.loc[tab1['CODGEO'] == doublons[2], :].iloc[0,:]
test = tab1.loc[tab1['CODGEO'] == doublons[2], :].iloc[1,:].notnull()
# test sur les NaN
diff = tab2[float_columns].isnull() == tab1[float_columns].isnull()
# test sur les valeurs
diff = tab2[float_columns] - tab1[float_columns]
| lgpl-3.0 |
ofrei/ldsc | ldsc.py | 1 | 33879 | #!/usr/bin/env python
'''
(c) 2014 Brendan Bulik-Sullivan and Hilary Finucane
LDSC is a command line tool for estimating
1. LD Score
2. heritability / partitioned heritability
3. genetic covariance / correlation
'''
from __future__ import division
import ldscore.ldscore as ld
import ldscore.parse as ps
import ldscore.sumstats as sumstats
import ldscore.regressions as reg
import numpy as np
import pandas as pd
from subprocess import call
from itertools import product
import time, sys, traceback, argparse
try:
x = pd.DataFrame({'A': [1, 2, 3]})
x.sort_values(by='A')
except AttributeError:
raise ImportError('LDSC requires pandas version >= 0.17.0')
__version__ = '1.0.0'
MASTHEAD = "*********************************************************************\n"
MASTHEAD += "* LD Score Regression (LDSC)\n"
MASTHEAD += "* Version {V}\n".format(V=__version__)
MASTHEAD += "* (C) 2014-2015 Brendan Bulik-Sullivan and Hilary Finucane\n"
MASTHEAD += "* Broad Institute of MIT and Harvard / MIT Department of Mathematics\n"
MASTHEAD += "* GNU General Public License v3\n"
MASTHEAD += "*********************************************************************\n"
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
pd.set_option('precision', 4)
pd.set_option('max_colwidth',1000)
np.set_printoptions(linewidth=1000)
np.set_printoptions(precision=4)
def sec_to_str(t):
'''Convert seconds to days:hours:minutes:seconds'''
[d, h, m, s, n] = reduce(lambda ll, b : divmod(ll[0], b) + ll[1:], [(t, 1), 60, 60, 24])
f = ''
if d > 0:
f += '{D}d:'.format(D=d)
if h > 0:
f += '{H}h:'.format(H=h)
if m > 0:
f += '{M}m:'.format(M=m)
f += '{S}s'.format(S=s)
return f
def _remove_dtype(x):
'''Removes dtype: float64 and dtype: int64 from pandas printouts'''
x = str(x)
x = x.replace('\ndtype: int64', '')
x = x.replace('\ndtype: float64', '')
return x
class Logger(object):
'''
Lightweight logging.
TODO: replace with logging module
'''
def __init__(self, fh):
self.log_fh = open(fh, 'wb')
def log(self, msg):
'''
Print to log file and stdout with a single command.
'''
print >>self.log_fh, msg
print msg
def __filter__(fname, noun, verb, merge_obj):
merged_list = None
if fname:
f = lambda x,n: x.format(noun=noun, verb=verb, fname=fname, num=n)
x = ps.FilterFile(fname)
c = 'Read list of {num} {noun} to {verb} from {fname}'
print f(c, len(x.IDList))
merged_list = merge_obj.loj(x.IDList)
len_merged_list = len(merged_list)
if len_merged_list > 0:
c = 'After merging, {num} {noun} remain'
print f(c, len_merged_list)
else:
error_msg = 'No {noun} retained for analysis'
raise ValueError(f(error_msg, 0))
return merged_list
def __isclose__(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def annot_sort_key(s):
'''For use with --cts-bin. Fixes weird pandas crosstab column order.'''
if type(s) == tuple:
s = [x.split('_')[0] for x in s]
s = map(lambda x: float(x) if x != 'min' else -float('inf'), s)
else: # type(s) = str:
s = s.split('_')[0]
if s == 'min':
s = float('-inf')
else:
s = float(s)
return s
def ldscore(args, log):
'''
Wrapper function for estimating l1, l1^2, l2 and l4 (+ optionally standard errors) from
reference panel genotypes.
Annot format is
chr snp bp cm <annotations>
'''
if args.bfile:
snp_file, snp_obj = args.bfile+'.bim', ps.PlinkBIMFile
ind_file, ind_obj = args.bfile+'.fam', ps.PlinkFAMFile
array_file, array_obj = args.bfile+'.bed', ld.PlinkBEDFile
# read bim/snp
array_snps = snp_obj(snp_file)
m = len(array_snps.IDList)
log.log('Read list of {m} SNPs from {f}'.format(m=m, f=snp_file))
if args.annot is not None: # read --annot
try:
if args.thin_annot: # annot file has only annotations
annot = ps.ThinAnnotFile(args.annot)
n_annot, ma = len(annot.df.columns), len(annot.df)
log.log("Read {A} annotations for {M} SNPs from {f}".format(f=args.annot,
A=n_annot, M=ma))
annot_matrix = annot.df.values
annot_colnames = annot.df.columns
keep_snps = None
else:
annot = ps.AnnotFile(args.annot)
n_annot, ma = len(annot.df.columns) - 4, len(annot.df)
log.log("Read {A} annotations for {M} SNPs from {f}".format(f=args.annot,
A=n_annot, M=ma))
annot_matrix = np.array(annot.df.iloc[:,4:])
annot_colnames = annot.df.columns[4:]
keep_snps = None
if np.any(annot.df.SNP.values != array_snps.df.SNP.values):
raise ValueError('The .annot file must contain the same SNPs in the same'+\
' order as the .bim file.')
except Exception:
log.log('Error parsing .annot file')
raise
elif args.extract is not None: # --extract
keep_snps = __filter__(args.extract, 'SNPs', 'include', array_snps)
annot_matrix, annot_colnames, n_annot = None, None, 1
elif args.cts_bin is not None and args.cts_breaks is not None: # --cts-bin
cts_fnames = sumstats._splitp(args.cts_bin) # read filenames
args.cts_breaks = args.cts_breaks.replace('N','-') # replace N with negative sign
try: # split on x
breaks = [[float(x) for x in y.split(',')] for y in args.cts_breaks.split('x')]
except ValueError as e:
raise ValueError('--cts-breaks must be a comma-separated list of numbers: '
+str(e.args))
if len(breaks) != len(cts_fnames):
raise ValueError('Need to specify one set of breaks for each file in --cts-bin.')
if args.cts_names:
cts_colnames = [str(x) for x in args.cts_names.split(',')]
if len(cts_colnames) != len(cts_fnames):
msg = 'Must specify either no --cts-names or one value for each file in --cts-bin.'
raise ValueError(msg)
else:
cts_colnames = ['ANNOT'+str(i) for i in xrange(len(cts_fnames))]
log.log('Reading numbers with which to bin SNPs from {F}'.format(F=args.cts_bin))
cts_levs = []
full_labs = []
for i,fh in enumerate(cts_fnames):
vec = ps.read_cts(cts_fnames[i], array_snps.df.SNP.values)
max_cts = np.max(vec)
min_cts = np.min(vec)
cut_breaks = list(breaks[i])
name_breaks = list(cut_breaks)
if np.all(cut_breaks >= max_cts) or np.all(cut_breaks <= min_cts):
raise ValueError('All breaks lie outside the range of the cts variable.')
if np.all(cut_breaks <= max_cts):
name_breaks.append(max_cts)
cut_breaks.append(max_cts+1)
if np.all(cut_breaks >= min_cts):
name_breaks.append(min_cts)
cut_breaks.append(min_cts-1)
name_breaks.sort()
cut_breaks.sort()
n_breaks = len(cut_breaks)
# so that col names are consistent across chromosomes with different max vals
name_breaks[0] = 'min'
name_breaks[-1] = 'max'
name_breaks = [str(x) for x in name_breaks]
labs = [name_breaks[i]+'_'+name_breaks[i+1] for i in xrange(n_breaks-1)]
cut_vec = pd.Series(pd.cut(vec, bins=cut_breaks, labels=labs))
cts_levs.append(cut_vec)
full_labs.append(labs)
annot_matrix = pd.concat(cts_levs, axis=1)
annot_matrix.columns = cts_colnames
# crosstab -- for now we keep empty columns
annot_matrix = pd.crosstab(annot_matrix.index,
[annot_matrix[i] for i in annot_matrix.columns], dropna=False,
colnames=annot_matrix.columns)
# add missing columns
if len(cts_colnames) > 1:
for x in product(*full_labs):
if x not in annot_matrix.columns:
annot_matrix[x] = 0
else:
for x in full_labs[0]:
if x not in annot_matrix.columns:
annot_matrix[x] = 0
annot_matrix = annot_matrix[sorted(annot_matrix.columns, key=annot_sort_key)]
if len(cts_colnames) > 1:
# flatten multi-index
annot_colnames = ['_'.join([cts_colnames[i]+'_'+b for i,b in enumerate(c)])
for c in annot_matrix.columns]
else:
annot_colnames = [cts_colnames[0]+'_'+b for b in annot_matrix.columns]
annot_matrix = np.matrix(annot_matrix)
keep_snps = None
n_annot = len(annot_colnames)
if np.any(np.sum(annot_matrix, axis=1) == 0):
# This exception should never be raised. For debugging only.
raise ValueError('Some SNPs have no annotation in --cts-bin. This is a bug!')
else:
annot_matrix, annot_colnames, keep_snps = None, None, None,
n_annot = 1
# read fam
array_indivs = ind_obj(ind_file)
n = len(array_indivs.IDList)
log.log('Read list of {n} individuals from {f}'.format(n=n, f=ind_file))
# read keep_indivs
if args.keep:
keep_indivs = __filter__(args.keep, 'individuals', 'include', array_indivs)
else:
keep_indivs = None
# read genotype array
log.log('Reading genotypes from {fname}'.format(fname=array_file))
geno_array = array_obj(array_file, n, array_snps, keep_snps=keep_snps,
keep_indivs=keep_indivs, mafMin=args.maf)
# filter annot_matrix down to only SNPs passing MAF cutoffs
if annot_matrix is not None:
annot_keep = geno_array.kept_snps
annot_matrix = annot_matrix[annot_keep,:]
# determine block widths
x = np.array((args.ld_wind_snps, args.ld_wind_kb, args.ld_wind_cm), dtype=bool)
if np.sum(x) != 1:
raise ValueError('Must specify exactly one --ld-wind option')
if args.ld_wind_snps:
max_dist = args.ld_wind_snps
coords = np.array(xrange(geno_array.m))
elif args.ld_wind_kb:
max_dist = args.ld_wind_kb*1000
coords = np.array(array_snps.df['BP'])[geno_array.kept_snps]
elif args.ld_wind_cm:
max_dist = args.ld_wind_cm
coords = np.array(array_snps.df['CM'])[geno_array.kept_snps]
block_left = ld.getBlockLefts(coords, max_dist)
if block_left[len(block_left)-1] == 0 and not args.yes_really:
error_msg = 'Do you really want to compute whole-chomosome LD Score? If so, set the '
error_msg += '--yes-really flag (warning: it will use a lot of time / memory)'
raise ValueError(error_msg)
scale_suffix = ''
if args.pq_exp is not None:
log.log('Computing LD with pq ^ {S}.'.format(S=args.pq_exp))
msg = 'Note that LD Scores with pq raised to a nonzero power are'
msg += 'not directly comparable to normal LD Scores.'
log.log(msg)
scale_suffix = '_S{S}'.format(S=args.pq_exp)
pq = np.matrix(geno_array.maf*(1-geno_array.maf)).reshape((geno_array.m, 1))
pq = np.power(pq, args.pq_exp)
if annot_matrix is not None:
annot_matrix = np.multiply(annot_matrix, pq)
else:
annot_matrix = pq
log.log("Estimating LD Score.")
# Adjust r2_min and r2_max thresholds.
# They must be vectors (e.g. [None] instead of None).
# Values of 0.0 and 1.0 must be replaced with None to avoid filtering.
# Mathematicaly all r2 values are within [0, 1], but due to float-point precision
# some values may exceed 1 my small amount, and we don't want them to be filtered.
if args.r2_min is None: args.r2_min = [0.0]
if args.r2_max is None: args.r2_max = [1.0]
args.r2_min = [None if __isclose__(x, 0.0) else x for x in args.r2_min]
args.r2_max = [None if __isclose__(x, 1.0) else x for x in args.r2_max]
if args.l2:
lN_vec = geno_array.ldScoreVarBlocks(block_left, args.chunk_size, annot=annot_matrix,
r2Min_vec=args.r2_min, r2Max_vec=args.r2_max,
unbiased=(not args.bias_r2))
col_prefix = "L2"; file_suffix = "l2"
elif args.l4:
lN_vec = geno_array.ldScoreVarBlocks_l4(block_left, args.chunk_size, annot=annot_matrix,
r2Min_vec=args.r2_min, r2Max_vec=args.r2_max)
col_prefix = "L4"; file_suffix = "l4"
else:
raise ValueError('Must specify --l2 or --l4 option')
if n_annot == 1:
ldscore_colnames = [col_prefix+scale_suffix]
else:
ldscore_colnames = [y+col_prefix+scale_suffix for y in annot_colnames]
# print .ldscore. Output columns: CHR, BP, RS, [LD Scores]
for lN_index, lN in enumerate(lN_vec):
r2_bin_suffix = '-r2bin-{i}'.format(i=(lN_index+1)) if len(lN_vec) > 1 else ''
out_fname = args.out + r2_bin_suffix + '.' + file_suffix + '.ldscore'
new_colnames = geno_array.colnames + ldscore_colnames
df = pd.DataFrame.from_records(np.c_[geno_array.df, lN])
df.columns = new_colnames
if args.print_snps:
if args.print_snps.endswith('gz'):
print_snps = pd.read_csv(args.print_snps, header=None, compression='gzip')
elif args.print_snps.endswith('bz2'):
print_snps = pd.read_csv(args.print_snps, header=None, compression='bz2')
else:
print_snps = pd.read_csv(args.print_snps, header=None)
if len(print_snps.columns) > 1:
raise ValueError('--print-snps must refer to a file with a one column of SNP IDs.')
log.log('Reading list of {N} SNPs for which to print LD Scores from {F}'.format(\
F=args.print_snps, N=len(print_snps)))
print_snps.columns=['SNP']
df = df.ix[df.SNP.isin(print_snps.SNP),:]
if len(df) == 0:
raise ValueError('After merging with --print-snps, no SNPs remain.')
else:
msg = 'After merging with --print-snps, LD Scores for {N} SNPs will be printed.'
log.log(msg.format(N=len(df)))
l2_suffix = '.gz'
log.log("Writing LD Scores for {N} SNPs to {f}.gz".format(f=out_fname, N=len(df)))
df.drop(['CM','MAF'], axis=1).to_csv(out_fname, sep="\t", header=True, index=False,
float_format='%.3f')
call(['gzip', '-f', out_fname])
if annot_matrix is not None:
M = np.atleast_1d(np.squeeze(np.asarray(np.sum(annot_matrix, axis=0))))
ii = geno_array.maf > 0.05
M_5_50 = np.atleast_1d(np.squeeze(np.asarray(np.sum(annot_matrix[ii,:], axis=0))))
else:
M = [geno_array.m]
M_5_50 = [np.sum(geno_array.maf > 0.05)]
# print .M
fout_M = open(args.out + '.'+ file_suffix +'.M','wb')
print >>fout_M, '\t'.join(map(str,M))
fout_M.close()
# print .M_5_50
fout_M_5_50 = open(args.out + '.'+ file_suffix +'.M_5_50','wb')
print >>fout_M_5_50, '\t'.join(map(str,M_5_50))
fout_M_5_50.close()
# print annot matrix
if (args.cts_bin is not None) and not args.no_print_annot:
out_fname_annot = args.out + '.annot'
new_colnames = geno_array.colnames + ldscore_colnames
annot_df = pd.DataFrame(np.c_[geno_array.df, annot_matrix])
annot_df.columns = new_colnames
del annot_df['MAF']
log.log("Writing annot matrix produced by --cts-bin to {F}".format(F=out_fname+'.gz'))
annot_df.to_csv(out_fname_annot, sep="\t", header=True, index=False)
call(['gzip', '-f', out_fname_annot])
# print LD Score summary
pd.set_option('display.max_rows', 200)
log.log('\nSummary of LD Scores in {F}'.format(F=out_fname+l2_suffix))
t = df.ix[:,4:].describe()
log.log( t.ix[1:,:] )
np.seterr(divide='ignore', invalid='ignore') # print NaN instead of weird errors
# print correlation matrix including all LD Scores and sample MAF
log.log('')
log.log('MAF/LD Score Correlation Matrix')
log.log( df.ix[:,4:].corr() )
# print condition number
if n_annot > 1: # condition number of a column vector w/ nonzero var is trivially one
log.log('\nLD Score Matrix Condition Number')
cond_num = np.linalg.cond(df.ix[:,5:])
log.log( reg.remove_brackets(str(np.matrix(cond_num))) )
if cond_num > 10000:
log.log('WARNING: ill-conditioned LD Score Matrix!')
# summarize annot matrix if there is one
if annot_matrix is not None:
# covariance matrix
x = pd.DataFrame(annot_matrix, columns=annot_colnames)
log.log('\nAnnotation Correlation Matrix')
log.log( x.corr() )
# column sums
log.log('\nAnnotation Matrix Column Sums')
log.log(_remove_dtype(x.sum(axis=0)))
# row sums
log.log('\nSummary of Annotation Matrix Row Sums')
row_sums = x.sum(axis=1).describe()
log.log(_remove_dtype(row_sums))
np.seterr(divide='raise', invalid='raise')
parser = argparse.ArgumentParser()
parser.add_argument('--out', default='ldsc', type=str,
help='Output filename prefix. If --out is not set, LDSC will use ldsc as the '
'defualt output filename prefix.')
# Basic LD Score Estimation Flags'
parser.add_argument('--bfile', default=None, type=str,
help='Prefix for Plink .bed/.bim/.fam file')
parser.add_argument('--l2', default=False, action='store_true',
help='Estimate l2. Compatible with both jackknife and non-jackknife.')
# Filtering / Data Management for LD Score
parser.add_argument('--extract', default=None, type=str,
help='File with SNPs to include in LD Score estimation. '
'The file should contain one SNP ID per row.')
parser.add_argument('--keep', default=None, type=str,
help='File with individuals to include in LD Score estimation. '
'The file should contain one individual ID per row.')
parser.add_argument('--ld-wind-snps', default=None, type=int,
help='Specify the window size to be used for estimating LD Scores in units of '
'# of SNPs. You can only specify one --ld-wind-* option.')
parser.add_argument('--ld-wind-kb', default=None, type=float,
help='Specify the window size to be used for estimating LD Scores in units of '
'kilobase-pairs (kb). You can only specify one --ld-wind-* option.')
parser.add_argument('--ld-wind-cm', default=None, type=float,
help='Specify the window size to be used for estimating LD Scores in units of '
'centiMorgans (cM). You can only specify one --ld-wind-* option.')
parser.add_argument('--print-snps', default=None, type=str,
help='This flag tells LDSC to only print LD Scores for the SNPs listed '
'(one ID per row) in PRINT_SNPS. The sum r^2 will still include SNPs not in '
'PRINT_SNPs. This is useful for reducing the number of LD Scores that have to be '
'read into memory when estimating h2 or rg.' )
# Fancy LD Score Estimation Flags
parser.add_argument('--annot', default=None, type=str,
help='Filename prefix for annotation file for partitioned LD Score estimation. '
'LDSC will automatically append .annot or .annot.gz to the filename prefix. '
'See docs/file_formats_ld for a definition of the .annot format.')
parser.add_argument('--thin-annot', action='store_true', default=False,
help='This flag says your annot files have only annotations, with no SNP, CM, CHR, BP columns.')
parser.add_argument('--cts-bin', default=None, type=str,
help='This flag tells LDSC to compute partitioned LD Scores, where the partition '
'is defined by cutting one or several continuous variable[s] into bins. '
'The argument to this flag should be the name of a single file or a comma-separated '
'list of files. The file format is two columns, with SNP IDs in the first column '
'and the continuous variable in the second column. ')
parser.add_argument('--cts-breaks', default=None, type=str,
help='Use this flag to specify names for the continuous variables cut into bins '
'with --cts-bin. For each continuous variable, specify breaks as a comma-separated '
'list of breakpoints, and separate the breakpoints for each variable with an x. '
'For example, if binning on MAF and distance to gene (in kb), '
'you might set --cts-breaks 0.1,0.25,0.4x10,100,1000 ')
parser.add_argument('--cts-names', default=None, type=str,
help='Use this flag to specify names for the continuous variables cut into bins '
'with --cts-bin. The argument to this flag should be a comma-separated list of '
'names. For example, if binning on DAF and distance to gene, you might set '
'--cts-bin DAF,DIST_TO_GENE ')
parser.add_argument('--per-allele', default=False, action='store_true',
help='Setting this flag causes LDSC to compute per-allele LD Scores, '
'i.e., \ell_j := \sum_k p_k(1-p_k)r^2_{jk}, where p_k denotes the MAF '
'of SNP j. ')
parser.add_argument('--pq-exp', default=None, type=float,
help='Setting this flag causes LDSC to compute LD Scores with the given scale factor, '
'i.e., \ell_j := \sum_k (p_k(1-p_k))^a r^2_{jk}, where p_k denotes the MAF '
'of SNP j and a is the argument to --pq-exp. ')
parser.add_argument('--no-print-annot', default=False, action='store_true',
help='By defualt, seting --cts-bin or --cts-bin-add causes LDSC to print '
'the resulting annot matrix. Setting --no-print-annot tells LDSC not '
'to print the annot matrix. ')
parser.add_argument('--maf', default=None, type=float,
help='Minor allele frequency lower bound. Default is MAF > 0.')
parser.add_argument('--l4', default=False, action='store_true',
help='Estimate l4. Compatible with both jackknife and non-jackknife.')
parser.add_argument('--r2-min', default=None, type=float, nargs='+',
help='Lower bound (exclusive) of r2 to consider in ld score estimation. '
'Intended usage of this parameter is to create a binned histogram of l2 or l4 values. '
'For this reason --r2-min and --r2-max are always applied to biased estimates of allelic correlation, regardless of --bias-r2 flag, '
'to ensure that "--r2-min 0 --r2-max 1" cover the entire range of r2 values. '
'Can be used together with --l2 and --l4.')
parser.add_argument('--r2-max', default=None, type=float, nargs='+',
help='Upper bound (inclusive) of r2 to consider in ld score estimation. '
'See description of --r2-min option for additional details.')
parser.add_argument('--bias-r2', default=False, action='store_true',
help='Keep biased r2 estimates in ld score calculation (applies to --l2; incompatible with --l4; has no effect on --r2-min and --r2-max)')
# Basic Flags for Working with Variance Components
parser.add_argument('--h2', default=None, type=str,
help='Filename for a .sumstats[.gz] file for one-phenotype LD Score regression. '
'--h2 requires at minimum also setting the --ref-ld and --w-ld flags.')
parser.add_argument('--h2-cts', default=None, type=str,
help='Filename for a .sumstats[.gz] file for cell-type-specific analysis. '
'--h2-cts requires the --ref-ld-chr, --w-ld, and --ref-ld-chr-cts flags.')
parser.add_argument('--rg', default=None, type=str,
help='Comma-separated list of prefixes of .chisq filed for genetic correlation estimation.')
parser.add_argument('--ref-ld', default=None, type=str,
help='Use --ref-ld to tell LDSC which LD Scores to use as the predictors in the LD '
'Score regression. '
'LDSC will automatically append .l2.ldscore/.l2.ldscore.gz to the filename prefix.')
parser.add_argument('--ref-ld-chr', default=None, type=str,
help='Same as --ref-ld, but will automatically concatenate .l2.ldscore files split '
'across 22 chromosomes. LDSC will automatically append .l2.ldscore/.l2.ldscore.gz '
'to the filename prefix. If the filename prefix contains the symbol @, LDSC will '
'replace the @ symbol with chromosome numbers. Otherwise, LDSC will append chromosome '
'numbers to the end of the filename prefix.'
'Example 1: --ref-ld-chr ld/ will read ld/1.l2.ldscore.gz ... ld/22.l2.ldscore.gz'
'Example 2: --ref-ld-chr ld/@_kg will read ld/1_kg.l2.ldscore.gz ... ld/22_kg.l2.ldscore.gz')
parser.add_argument('--w-ld', default=None, type=str,
help='Filename prefix for file with LD Scores with sum r^2 taken over SNPs included '
'in the regression. LDSC will automatically append .l2.ldscore/.l2.ldscore.gz.')
parser.add_argument('--w-ld-chr', default=None, type=str,
help='Same as --w-ld, but will read files split into 22 chromosomes in the same '
'manner as --ref-ld-chr.')
parser.add_argument('--overlap-annot', default=False, action='store_true',
help='This flag informs LDSC that the partitioned LD Scores were generates using an '
'annot matrix with overlapping categories (i.e., not all row sums equal 1), '
'and prevents LDSC from displaying output that is meaningless with overlapping categories.')
parser.add_argument('--print-coefficients',default=False,action='store_true',
help='when categories are overlapping, print coefficients as well as heritabilities.')
parser.add_argument('--frqfile', type=str,
help='For use with --overlap-annot. Provides allele frequencies to prune to common '
'snps if --not-M-5-50 is not set.')
parser.add_argument('--frqfile-chr', type=str,
help='Prefix for --frqfile files split over chromosome.')
parser.add_argument('--no-intercept', action='store_true',
help = 'If used with --h2, this constrains the LD Score regression intercept to equal '
'1. If used with --rg, this constrains the LD Score regression intercepts for the h2 '
'estimates to be one and the intercept for the genetic covariance estimate to be zero.')
parser.add_argument('--intercept-h2', action='store', default=None,
help = 'Intercepts for constrained-intercept single-trait LD Score regression.')
parser.add_argument('--intercept-gencov', action='store', default=None,
help = 'Intercepts for constrained-intercept cross-trait LD Score regression.'
' Must have same length as --rg. The first entry is ignored.')
parser.add_argument('--M', default=None, type=str,
help='# of SNPs (if you don\'t want to use the .l2.M files that came with your .l2.ldscore.gz files)')
parser.add_argument('--two-step', default=None, type=float,
help='Test statistic bound for use with the two-step estimator. Not compatible with --no-intercept and --constrain-intercept.')
parser.add_argument('--chisq-max', default=None, type=float,
help='Max chi^2.')
parser.add_argument('--ref-ld-chr-cts', default=None, type=str,
help='Name of a file that has a list of file name prefixes for cell-type-specific analysis.')
parser.add_argument('--print-all-cts', action='store_true', default=False)
# Flags for both LD Score estimation and h2/gencor estimation
parser.add_argument('--print-cov', default=False, action='store_true',
help='For use with --h2/--rg. This flag tells LDSC to print the '
'covaraince matrix of the estimates.')
parser.add_argument('--print-delete-vals', default=False, action='store_true',
help='If this flag is set, LDSC will print the block jackknife delete-values ('
'i.e., the regression coefficeints estimated from the data with a block removed). '
'The delete-values are formatted as a matrix with (# of jackknife blocks) rows and '
'(# of LD Scores) columns.')
# Flags you should almost never use
parser.add_argument('--chunk-size', default=50, type=int,
help='Chunk size for LD Score calculation. Use the default.')
parser.add_argument('--pickle', default=False, action='store_true',
help='Store .l2.ldscore files as pickles instead of gzipped tab-delimited text.')
parser.add_argument('--yes-really', default=False, action='store_true',
help='Yes, I really want to compute whole-chromosome LD Score.')
parser.add_argument('--invert-anyway', default=False, action='store_true',
help="Force LDSC to attempt to invert ill-conditioned matrices.")
parser.add_argument('--n-blocks', default=200, type=int,
help='Number of block jackknife blocks.')
parser.add_argument('--not-M-5-50', default=False, action='store_true',
help='This flag tells LDSC to use the .l2.M file instead of the .l2.M_5_50 file.')
parser.add_argument('--return-silly-things', default=False, action='store_true',
help='Force ldsc to return silly genetic correlation estimates.')
parser.add_argument('--no-check-alleles', default=False, action='store_true',
help='For rg estimation, skip checking whether the alleles match. This check is '
'redundant for pairs of chisq files generated using munge_sumstats.py and the '
'same argument to the --merge-alleles flag.')
# transform to liability scale
parser.add_argument('--samp-prev',default=None,
help='Sample prevalence of binary phenotype (for conversion to liability scale).')
parser.add_argument('--pop-prev',default=None,
help='Population prevalence of binary phenotype (for conversion to liability scale).')
if __name__ == '__main__':
args = parser.parse_args()
if args.out is None:
raise ValueError('--out is required.')
log = Logger(args.out+'.log')
try:
defaults = vars(parser.parse_args(''))
opts = vars(args)
non_defaults = [x for x in opts.keys() if opts[x] != defaults[x]]
header = MASTHEAD
header += "Call: \n"
header += './ldsc.py \\\n'
options = ['--'+x.replace('_','-')+' '+str(opts[x])+' \\' for x in non_defaults]
header += '\n'.join(options).replace('True','').replace('False','')
header = header[0:-1]+'\n'
log.log(header)
log.log('Beginning analysis at {T}'.format(T=time.ctime()))
start_time = time.time()
if args.n_blocks <= 1:
raise ValueError('--n-blocks must be an integer > 1.')
if args.bfile is not None:
if args.l2 is False and args.l4 is False:
raise ValueError('Must specify --l2 or --l4 with --bfile.')
if (args.l4 is True) and (args.bias_r2 is False):
raise ValueError('--l4 must be used together with --bias-r2. Unbiased --l4 calculation is not implemented.')
if args.annot is not None and args.extract is not None:
raise ValueError('--annot and --extract are currently incompatible.')
if args.cts_bin is not None and args.extract is not None:
raise ValueError('--cts-bin and --extract are currently incompatible.')
if args.annot is not None and args.cts_bin is not None:
raise ValueError('--annot and --cts-bin are currently incompatible.')
if (args.cts_bin is not None) != (args.cts_breaks is not None):
raise ValueError('Must set both or neither of --cts-bin and --cts-breaks.')
if args.per_allele and args.pq_exp is not None:
raise ValueError('Cannot set both --per-allele and --pq-exp (--per-allele is equivalent to --pq-exp 1).')
if args.per_allele:
args.pq_exp = 1
if args.l4 and (args.pq_exp is not None):
args.pq_exp *= 2
ldscore(args, log)
# summary statistics
elif (args.h2 or args.rg or args.h2_cts) and (args.ref_ld or args.ref_ld_chr) and (args.w_ld or args.w_ld_chr):
if args.h2 is not None and args.rg is not None:
raise ValueError('Cannot set both --h2 and --rg.')
if args.ref_ld and args.ref_ld_chr:
raise ValueError('Cannot set both --ref-ld and --ref-ld-chr.')
if args.w_ld and args.w_ld_chr:
raise ValueError('Cannot set both --w-ld and --w-ld-chr.')
if (args.samp_prev is not None) != (args.pop_prev is not None):
raise ValueError('Must set both or neither of --samp-prev and --pop-prev.')
if not args.overlap_annot or args.not_M_5_50:
if args.frqfile is not None or args.frqfile_chr is not None:
log.log('The frequency file is unnecessary and is being ignored.')
args.frqfile = None
args.frqfile_chr = None
if args.overlap_annot and not args.not_M_5_50:
if not ((args.frqfile and args.ref_ld) or (args.frqfile_chr and args.ref_ld_chr)):
raise ValueError('Must set either --frqfile and --ref-ld or --frqfile-chr and --ref-ld-chr')
if args.rg:
sumstats.estimate_rg(args, log)
elif args.h2:
sumstats.estimate_h2(args, log)
elif args.h2_cts:
sumstats.cell_type_specific(args, log)
# bad flags
else:
print header
print 'Error: no analysis selected.'
print 'ldsc.py -h describes options.'
except Exception:
ex_type, ex, tb = sys.exc_info()
log.log( traceback.format_exc(ex) )
raise
finally:
log.log('Analysis finished at {T}'.format(T=time.ctime()) )
time_elapsed = round(time.time()-start_time,2)
log.log('Total time elapsed: {T}'.format(T=sec_to_str(time_elapsed)))
| gpl-3.0 |
APMonitor/arduino | 5_Moving_Horizon_Estimation/1st_order_linear/Python/main_mhe.py | 1 | 3157 | import tclab
import numpy as np
import time
from APMonitor.apm import *
import matplotlib.pyplot as plt
# Connect to Arduino
a = tclab.TCLab()
# Run time in minutes
run_time = 10.0
# Number of cycles (1 cycle per 2 seconds)
loops = int(30.0*run_time)
# Temperature (K)
T1 = np.ones(loops) * a.T1 # measured T
T1mhe = np.ones(loops) * a.T1 # measured T
Tsp1 = np.ones(loops) * a.T1 # measured T
Kp = np.ones(loops) * 0.3598
tau = np.ones(loops) * 47.73
TC_ss = np.ones(loops) * 23
# milli-volts input
Q1 = np.ones(loops) * 0.0
Q2 = np.ones(loops) * 0.0
Q1[6:] = 100.0
Q1[50:] = 20.0
Q1[100:] = 80.0
Q1[150:] = 10.0
Q1[200:] = 95.0
Q1[250:] = 0.0
# time
tm = np.zeros(loops)
# moving horizon estimation
from mhe import *
s = 'http://byu.apmonitor.com'
#s = 'http://127.0.0.1'
b = 'mhe'
mhe_init()
# Main Loop
start_time = time.time()
prev_time = start_time
try:
for i in range(loops-1):
# Sleep time
sleep_max = 2.0
sleep = sleep_max - (time.time() - prev_time)
if sleep>=0.01:
time.sleep(sleep)
else:
time.sleep(0.01)
# Record time and change in time
t = time.time()
tm[i] = t - start_time
dt = t - prev_time
prev_time = t
# Read temperature in degC
T1[i] = a.T1
# MHE - match model to measurements
# run MHE every 2 seconds (see mhe.csv)
params = mhe(T1[i],Q1[i-1])
Kp[i] = params[0]
tau[i] = params[1]
TC_ss[i] = params[2]
T1mhe[i] = params[3]
# Write output (0-100%)
Q1[i] = min(100.0,max(0.0,Q1[i]))
a.Q1(Q1[i])
# Plot
plt.clf()
ax=plt.subplot(4,1,1)
ax.grid()
plt.plot(tm[0:i],T1[0:i],'ro',MarkerSize=3,label=r'$T_1 Measured$')
plt.plot(tm[0:i],T1mhe[0:i],'bx',MarkerSize=3,label=r'$T_1 MHE$')
plt.ylabel('Temperature (degC)')
plt.legend(loc='best')
ax=plt.subplot(4,1,2)
ax.grid()
plt.plot(tm[0:i],Q1[0:i],'r-',MarkerSize=3,label=r'$Q_1$')
plt.plot(tm[0:i],Q2[0:i],'b:',MarkerSize=3,label=r'$Q_2$')
plt.ylabel('Heaters')
plt.legend(loc='best')
ax=plt.subplot(4,1,3)
ax.grid()
plt.plot(tm[0:i],Kp[0:i],'ro',MarkerSize=3,label=r'$K_p$')
plt.ylabel('Parameters')
plt.legend(loc='best')
ax=plt.subplot(4,1,4)
ax.grid()
plt.plot(tm[0:i],tau[0:i],'k^',MarkerSize=3,label=r'$\tau$')
plt.plot(tm[0:i],TC_ss[0:i],'gs',MarkerSize=3,label=r'$TC_{ss}$')
plt.ylabel('Parameters')
plt.legend(loc='best')
plt.xlabel('Time (sec)')
plt.draw()
plt.pause(0.05)
# Open Web Interface
if i==5:
apm_web(s,b)
# Allow user to end loop with Ctrl-C
except KeyboardInterrupt:
# Disconnect from Arduino
a.Q1(0)
a.Q2(0)
print('Shutting down')
a.close()
# Make sure serial connection still closes when there's an error
except:
# Disconnect from Arduino
a.Q1(0)
a.Q2(0)
print('Error: Shutting down')
a.close()
raise
| apache-2.0 |
tombstone/models | research/autoencoder/MaskingNoiseAutoencoderRunner.py | 8 | 1644 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from autoencoder_models.DenoisingAutoencoder import MaskingNoiseAutoencoder
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
def standard_scale(X_train, X_test):
preprocessor = prep.StandardScaler().fit(X_train)
X_train = preprocessor.transform(X_train)
X_test = preprocessor.transform(X_test)
return X_train, X_test
def get_random_block_from_data(data, batch_size):
start_index = np.random.randint(0, len(data) - batch_size)
return data[start_index:(start_index + batch_size)]
X_train, X_test = standard_scale(mnist.train.images, mnist.test.images)
n_samples = int(mnist.train.num_examples)
training_epochs = 100
batch_size = 128
display_step = 1
autoencoder = MaskingNoiseAutoencoder(
n_input=784,
n_hidden=200,
transfer_function=tf.nn.softplus,
optimizer=tf.train.AdamOptimizer(learning_rate=0.001),
dropout_probability=0.95)
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
for i in range(total_batch):
batch_xs = get_random_block_from_data(X_train, batch_size)
cost = autoencoder.partial_fit(batch_xs)
avg_cost += cost / n_samples * batch_size
if epoch % display_step == 0:
print("Epoch:", '%d,' % (epoch + 1),
"Cost:", "{:.9f}".format(avg_cost))
print("Total cost: " + str(autoencoder.calc_total_cost(X_test)))
| apache-2.0 |
vshtanko/scikit-learn | examples/model_selection/grid_search_text_feature_extraction.py | 253 | 4158 | """
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: Olivier Grisel <[email protected]>
# Peter Prettenhofer <[email protected]>
# Mathieu Blondel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
data = fetch_20newsgroups(subset='train', categories=categories)
print("%d documents" % len(data.filenames))
print("%d categories" % len(data.target_names))
print()
###############################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data.data, data.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
| bsd-3-clause |
PrashntS/scikit-learn | sklearn/kernel_approximation.py | 258 | 17973 | """
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .utils.validation import check_is_fitted
from .metrics.pairwise import pairwise_kernels
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(http://www.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X < 0).any():
raise ValueError("X may not contain entries smaller than zero.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set parameters."""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X, y=None):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
msg = ("%(name)s is not fitted. Call fit to set the parameters before"
" calling transform")
check_is_fitted(self, "sample_interval_", msg=msg)
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
X = check_array(X, accept_sparse='csr')
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, 'components_')
X = check_array(X, accept_sparse='csr')
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**kernel_params)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params
| bsd-3-clause |
tswast/google-cloud-python | firestore/docs/conf.py | 2 | 11885 | # -*- coding: utf-8 -*-
#
# google-cloud-firestore documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
__version__ = "0.1.0"
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.6.3"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_flags = ["members"]
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# Allow markdown includes (so releases.md can include CHANGLEOG.md)
# http://www.sphinx-doc.org/en/master/markdown.html
source_parsers = {".md": "recommonmark.parser.CommonMarkParser"}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"google-cloud-firestore"
copyright = u"2017, Google"
author = u"Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for Python",
"github_user": "googleapis",
"github_repo": "google-cloud-python",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-firestore-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"google-cloud-firestore.tex",
u"google-cloud-firestore Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"google-cloud-firestore",
u"google-cloud-firestore Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"google-cloud-firestore",
u"google-cloud-firestore Documentation",
author,
"google-cloud-firestore",
"GAPIC library for the {metadata.shortName}",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("http://python.readthedocs.org/en/latest/", None),
"gax": ("https://gax-python.readthedocs.org/en/latest/", None),
"google-auth": ("https://google-auth.readthedocs.io/en/stable", None),
"google-gax": ("https://gax-python.readthedocs.io/en/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest", None),
"grpc": ("https://grpc.io/grpc/python/", None),
"requests": ("https://requests.kennethreitz.org/en/stable/", None),
"fastavro": ("https://fastavro.readthedocs.io/en/stable/", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
| apache-2.0 |
dlodato/AliPhysics | PWGPP/FieldParam/fitsol.py | 39 | 8343 | #!/usr/bin/env python
debug = True # enable trace
def trace(x):
global debug
if debug: print(x)
trace("loading...")
from itertools import combinations, combinations_with_replacement
from glob import glob
from math import *
import operator
from os.path import basename
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.linear_model
import sklearn.feature_selection
import datetime
def prec_from_pathname(path):
if '2k' in path: return 0.002
elif '5k' in path: return 0.005
else: raise AssertionError('Unknown field strengh: %s' % path)
# ['x', 'y', 'z', 'xx', 'xy', 'xz', 'yy', ...]
def combinatrial_vars(vars_str='xyz', length=3):
term_list = []
for l in range(length):
term_list.extend([''.join(v) for v in combinations_with_replacement(list(vars_str), 1 + l)])
return term_list
# product :: a#* => [a] -> a
def product(xs):
return reduce(operator.mul, xs, 1) # foldl in Haskell
# (XYZ, "xx") -> XX
def term(dataframe, vars_str):
return product(map(lambda x: dataframe[x], list(vars_str)))
# (f(X), Y) -> (max deviation, max%, avg dev, avg%)
def deviation_stat(fX, Y, prec=0.005):
dev = np.abs(fX - Y)
(max_dev, avg_dev) = (dev.max(axis=0), dev.mean(axis=0))
(max_pct, avg_pct) = (max_dev / prec * 100, avg_dev / prec * 100)
return (max_dev, max_pct, avg_dev, avg_pct)
# IO Df
def load_samples(path, cylindrical_axis=True, absolute_axis=True, genvars=[]):
sample_cols = ['x', 'y', 'z', 'Bx', 'By', 'Bz']
df = pd.read_csv(path, sep=' ', names=sample_cols)
if cylindrical_axis:
df['r'] = np.sqrt(df.x**2 + df.y**2)
df['p'] = np.arctan2(df.y, df.x)
df['Bt'] = np.sqrt(df.Bx**2 + df.By**2)
df['Bpsi'] = np.arctan2(df.By, df.Bx) - np.arctan2(df.y, df.x)
df['Br'] = df.Bt * np.cos(df.Bpsi)
df['Bp'] = df.Bt * np.sin(df.Bpsi)
if absolute_axis:
df['X'] = np.abs(df.x)
df['Y'] = np.abs(df.y)
df['Z'] = np.abs(df.z)
for var in genvars:
df[var] = term(df, var)
return df
def choose(vars, df1, df2):
X1 = df1.loc[:, vars].as_matrix()
X2 = df2.loc[:, vars].as_matrix()
return (X1, X2)
# IO ()
def run_analysis_for_all_fields():
sample_set = glob("dat_z22/*2k*.sample.dat")
test_set = glob("dat_z22/*2k*.test.dat")
#print(sample_set, test_set)
assert(len(sample_set) == len(test_set) and len(sample_set) > 0)
result = pd.DataFrame()
for i, sample_file in enumerate(sample_set):
trace("run_analysis('%s', '%s')" % (sample_file, test_set[i]))
df = run_analysis(sample_file, test_set[i])
result = result.append(df, ignore_index=True)
write_header(result)
def run_analysis(sample_file = 'dat_z22/tpc2k-z0-q2.sample.dat',
test_file = 'dat_z22/tpc2k-z0-q2.test.dat'):
global precision, df, test, lr, la, xvars_full, xvars, yvars, X, Y, Xtest, Ytest, ana_result
precision = prec_from_pathname(sample_file)
assert(precision == prec_from_pathname(test_file))
xvars_full = combinatrial_vars('xyz', 3)[3:] # variables except x, y, z upto 3 dims
trace("reading training samples... " + sample_file)
df = load_samples(sample_file, genvars=xvars_full)
trace("reading test samples..." + test_file)
test = load_samples(test_file, genvars=xvars_full)
trace("linear regression fit...")
lr = sklearn.linear_model.LinearRegression()
#ri = sklearn.linear_model.RidgeCV()
#la = sklearn.linear_model.LassoCV()
fs = sklearn.feature_selection.RFE(lr, 1, verbose=0)
#xvars = ['x','y','z','xx','yy','zz','xy','yz','xz','xzz','yzz']
#xvars = ["xx", "yy", "zz", 'x', 'y', 'z', 'xzz', 'yzz']
#xvars = ['xxxr', 'xrrX', 'zzrX', 'p', 'xyrr', 'xzzr', 'xrrY', 'xzrX', 'xxxz', 'xzzr']
#xvars=['x', 'xzz', 'xyz', 'yz', 'yy', 'zz', 'xy', 'xx', 'z', 'y', 'xz', 'yzz']
yvars = ['Bx', 'By', 'Bz']
#yvars = ['Bz']
(Y, Ytest) = choose(yvars, df, test)
#(Y, Ytest) = (df['Bz'], test['Bz'])
xvars = combinatrial_vars('xyz', 3) # use all terms upto 3rd power
(X, Xtest) = choose(xvars, df, test)
for y in yvars:
fs.fit(X, df[y])
res = pd.DataFrame({ "term": xvars, "rank": fs.ranking_ })
trace(y)
trace(res.sort_values(by = "rank"))
#xvars=list(res.sort_values(by="rank")[:26]['term'])
lr.fit(X, Y)
trace(', '.join(yvars) + " = 1 + " + ' + '.join(xvars))
test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision)
#for i in range(len(yvars)):
# arr = [lr.intercept_[i]] + lr.coef_[i]
# arr = [ str(x) for x in arr ]
# print(yvars[i] + " = { " + ', '.join(arr) + " }")
# print("deviation stat [test]: max %.2e (%.1f%%) avg %.2e (%.1f%%)" %
# ( test_dev[0][i], test_dev[1][i], test_dev[2][i], test_dev[3][i] ))
(sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest))
trace("linear regression R^2 [train data]: %.8f" % sample_score)
trace("linear regression R^2 [test data] : %.8f" % test_score)
return pd.DataFrame(
{ "xvars": [xvars],
"yvars": [yvars],
"max_dev": [test_dev[0]],
"max%": [test_dev[1]],
"avg_dev": [test_dev[2]],
"avg%": [test_dev[3]],
"sample_score": [sample_score],
"score": [test_score],
"coeffs": [lr.coef_],
"intercept": [lr.intercept_],
"sample_file": [sample_file],
"test_file": [test_file],
"precision": [precision],
"volume_id": [volume_id_from_path(sample_file)]
})
def volume_id_from_path(path):
return basename(path)\
.replace('.sample.dat', '')\
.replace('-', '_')
def get_location_by_volume_id(id):
if 'its' in id: r_bin = 0
if 'tpc' in id: r_bin = 1
if 'tof' in id: r_bin = 2
if 'tofext' in id: r_bin = 3
if 'cal' in id: r_bin = 4
z_bin = int(id.split('_')[1][1:]) # "tofext2k_z0_q4" -> 0
if 'q1' in id: quadrant = 0
if 'q2' in id: quadrant = 1
if 'q3' in id: quadrant = 2
if 'q4' in id: quadrant = 3
return r_bin, z_bin, quadrant
def write_header(result):
#result.to_csv("magfield_params.csv")
#result.to_html("magfield_params.html")
print("# This file was generated from sysid.py at " + str(datetime.datetime.today()))
print("# " + ', '.join(result.iloc[0].yvars) + " = 1 + " + ' + '.join(result.iloc[0].xvars))
print("# barrel r: 0 < its < 80 < tpc < 250 < tof < 400 < tofext < 423 < cal < 500")
print("# barrel z: -550 < z < 550")
print("# phi: 0 < q1 < 0.5pi < q2 < pi < q3 < 1.5pi < q4 < 2pi")
print("# header: Rbin Zbin Quadrant Nval_per_compoment(=20)")
print("# data: Nval_per_compoment x floats")
#print("# R^2: coefficient of determination in multiple linear regression. [0,1]")
print("")
for index, row in result.iterrows():
#print("// ** %s - R^2 %s" % (row.volume_id, row.score))
print("#" + row.volume_id)
r_bin, z_bin, quadrant = get_location_by_volume_id(row.volume_id)
print("%s %s %s 20" % (r_bin, z_bin, quadrant))
for i, yvar in enumerate(row.yvars):
name = row.volume_id #+ '_' + yvar.lower()
print("# precision: tgt %.2e max %.2e (%.1f%%) avg %.2e (%.1f%%)" %
(row['precision'], row['max_dev'][i], row['max%'][i], row['avg_dev'][i], row['avg%'][i]))
coef = [row['intercept'][i]] + list(row['coeffs'][i])
arr = [ "%.5e" % x for x in coef ]
body = ' '.join(arr)
#decl = "const double[] %s = { %s };\n" % (name, body)
#print(decl)
print(body)
print("")
#write_header(run_analysis())
run_analysis_for_all_fields()
#for i in range(10):
# for xvars in combinations(xvars_full, i+1):
#(X, Xtest) = choose(xvars, df, test)
#lr.fit(X, Y)
#ri.fit(X, Y)
#la.fit(X, Y)
#fs.fit(X, Y)
#print xvars
#(sample_score, test_score) = (lr.score(X, Y), lr.score(Xtest, Ytest))
#print("linear R^2[sample] %.8f" % sample_score)
#print("linear R^2[test] %.8f" % test_score)
#(sample_score2, test_score2) = (la.score(X, Y), la.score(Xtest, Ytest))
#print("lasso R^2[sample] %.8f" % sample_score2)
#print("lasso R^2[test] %.8f" % test_score2)
#print(la.coef_)
#for i in range(len(yvars)):
# print(yvars[i])
# print(pd.DataFrame({"Name": xvars, "Params": lr.coef_[i]}).sort_values(by='Params'))
# print("+ %e" % lr.intercept_[i])
#sample_dev = deviation_stat(lr.predict(X), Y, prec=precision)
#test_dev = deviation_stat(lr.predict(Xtest), Ytest, prec=precision)
#test_dev2 = deviation_stat(la.predict(Xtest), Ytest, prec=precision)
#print("[sample] max %.2e (%.1f%%) avg %.2e (%.1f%%)" % sample_dev)
#print("[test] max %.2e (%.1f%%) avg %.2e (%.1f%%)" % test_dev )
#print("lasso [test] max %.2e (%.1f%%) avg %.2e (%.1f%%)" % test_dev2 )
| bsd-3-clause |
kirichoi/tellurium | tellurium/sedml/tesedml.py | 1 | 82282 | # -*- coding: utf-8 -*-
"""
Tellurium SED-ML support.
This module implements SED-ML support for tellurium.
----------------
Overview SED-ML
----------------
SED-ML is build of main classes
the Model Class,
the Simulation Class,
the Task Class,
the DataGenerator Class,
and the Output Class.
The Model Class
The Model class is used to reference the models used in the simulation experiment.
SED-ML itself is independent of the model encoding underlying the models. The only
requirement is that the model needs to be referenced by using an unambiguous identifier
which allows for finding it, for example using a MIRIAM URI. To specify the language in
which the model is encoded, a set of predefined language URNs is provided.
The SED-ML Change class allows the application of changes to the referenced models,
including changes on the XML attributes, e.g. changing the value of an observable,
computing the change of a value using mathematics, or general changes on any XML element
of the model representation that is addressable by XPath expressions, e.g. substituting
a piece of XML by an updated one.
TODO: DATA CLASS
The Simulation Class
The Simulation class defines the simulation settings and the steps taken during simulation.
These include the particular type of simulation and the algorithm used for the execution of
the simulation; preferably an unambiguous reference to such an algorithm should be given,
using a controlled vocabulary, or ontologies. One example for an ontology of simulation
algorithms is the Kinetic Simulation Algorithm Ontology KiSAO. Further information encodable
in the Simulation class includes the step size, simulation duration, and other
simulation-type dependent information.
The Task Class
SED-ML makes use of the notion of a Task class to combine a defined model (from the Model class)
and a defined simulation setting (from the Simulation class). A task always holds one reference each.
To refer to a specific model and to a specific simulation, the corresponding IDs are used.
The DataGenerator Class
The raw simulation result sometimes does not correspond to the desired output of the simulation,
e.g. one might want to normalise a plot before output, or apply post-processing like mean-value calculation.
The DataGenerator class allows for the encoding of such post-processings which need to be applied to the
simulation result before output. To define data generators, any addressable variable or parameter
of any defined model (from instances of the Model class) may be referenced, and new entities might
be specified using MathML definitions.
The Output Class
The Output class defines the output of the simulation, in the sense that it specifies what shall be
plotted in the output. To do so, an output type is defined, e.g. 2D-plot, 3D-plot or data table,
and the according axes or columns are all assigned to one of the formerly specified instances
of the DataGenerator class.
For information about SED-ML please refer to http://www.sed-ml.org/
and the SED-ML specification.
------------------------------------
SED-ML in tellurium: Implementation
------------------------------------
SED-ML support in tellurium is based on Combine Archives.
The SED-ML files in the Archive can be executed and stored with results.
----------------------------------------
SED-ML in tellurium: Supported Features
----------------------------------------
Tellurium supports SED-ML L1V3 with SBML as model format.
SBML models are fully supported, whereas for CellML models only basic support
is implemented (when additional support is requested it be implemented).
CellML models are transformed to SBML models which results in different XPath expressions,
so that targets, selections cannot be easily resolved in the CellMl-SBML.
Supported input for SED-ML are either SED-ML files ('.sedml' extension),
SED-ML XML strings or combine archives ('.sedx'|'.omex' extension).
Executable python code is generated from the SED-ML which allows the
execution of the defined simulation experiment.
In the current implementation all SED-ML constructs with exception of
XML transformation changes of the model
- Change.RemoveXML
- Change.AddXML
- Change.ChangeXML
are supported.
-------
Notice
-------
The main maintainer for SED-ML support is Matthias König.
Please let changes to this file be reviewed and make sure that all SED-ML related tests are working.
"""
from __future__ import absolute_import, print_function, division
import sys
import platform
import tempfile
import shutil
import traceback
import os.path
import warnings
import datetime
import zipfile
import re
import numpy as np
from collections import namedtuple
import jinja2
try:
import tesedml as libsedml
except ImportError:
import libsedml
from tellurium.utils import omex
from .mathml import evaluableMathML
import tellurium as te
try:
# required imports in generated python code
import pandas
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d
except ImportError:
warnings.warn("Dependencies for SEDML code execution not fulfilled.")
print(traceback.format_exc())
######################################################################################################################
# KISAO MAPPINGS
######################################################################################################################
KISAOS_CVODE = [ # 'cvode'
'KISAO:0000019', # CVODE
'KISAO:0000433', # CVODE-like method
'KISAO:0000407',
'KISAO:0000099',
'KISAO:0000035',
'KISAO:0000071',
"KISAO:0000288", # "BDF" cvode, stiff=true
"KISAO:0000280", # "Adams-Moulton" cvode, stiff=false
]
KISAOS_RK4 = [ # 'rk4'
'KISAO:0000032', # RK4 explicit fourth-order Runge-Kutta method
'KISAO:0000064', # Runge-Kutta based method
]
KISAOS_RK45 = [ # 'rk45'
'KISAO:0000086', # RKF45 embedded Runge-Kutta-Fehlberg 5(4) method
]
KISAOS_LSODA = [ # 'lsoda'
'KISAO:0000088', # roadrunner doesn't have an lsoda solver so use cvode
]
KISAOS_GILLESPIE = [ # 'gillespie'
'KISAO:0000241', # Gillespie-like method
'KISAO:0000029',
'KISAO:0000319',
'KISAO:0000274',
'KISAO:0000333',
'KISAO:0000329',
'KISAO:0000323',
'KISAO:0000331',
'KISAO:0000027',
'KISAO:0000082',
'KISAO:0000324',
'KISAO:0000350',
'KISAO:0000330',
'KISAO:0000028',
'KISAO:0000038',
'KISAO:0000039',
'KISAO:0000048',
'KISAO:0000074',
'KISAO:0000081',
'KISAO:0000045',
'KISAO:0000351',
'KISAO:0000084',
'KISAO:0000040',
'KISAO:0000046',
'KISAO:0000003',
'KISAO:0000051',
'KISAO:0000335',
'KISAO:0000336',
'KISAO:0000095',
'KISAO:0000022',
'KISAO:0000076',
'KISAO:0000015',
'KISAO:0000075',
'KISAO:0000278',
]
KISAOS_NLEQ = [ # 'nleq'
'KISAO:0000099',
'KISAO:0000274',
'KISAO:0000282',
'KISAO:0000283',
'KISAO:0000355',
'KISAO:0000356',
'KISAO:0000407',
'KISAO:0000408',
'KISAO:0000409',
'KISAO:0000410',
'KISAO:0000411',
'KISAO:0000412',
'KISAO:0000413',
'KISAO:0000432',
'KISAO:0000437',
]
# allowed algorithms for simulation type
KISAOS_STEADYSTATE = KISAOS_NLEQ
KISAOS_UNIFORMTIMECOURSE = KISAOS_CVODE + KISAOS_RK4 + KISAOS_RK45 + KISAOS_GILLESPIE + KISAOS_LSODA
KISAOS_ONESTEP = KISAOS_UNIFORMTIMECOURSE
# supported algorithm parameters
KISAOS_ALGORITHMPARAMETERS = {
'KISAO:0000209': ('relative_tolerance', float), # the relative tolerance
'KISAO:0000211': ('absolute_tolerance', float), # the absolute tolerance
'KISAO:0000220': ('maximum_bdf_order', int), # the maximum BDF (stiff) order
'KISAO:0000219': ('maximum_adams_order', int), # the maximum Adams (non-stiff) order
'KISAO:0000415': ('maximum_num_steps', int), # the maximum number of steps that can be taken before exiting
'KISAO:0000467': ('maximum_time_step', float), # the maximum time step that can be taken
'KISAO:0000485': ('minimum_time_step', float), # the minimum time step that can be taken
'KISAO:0000332': ('initial_time_step', float), # the initial value of the time step for algorithms that change this value
'KISAO:0000107': ('variable_step_size', bool), # whether or not the algorithm proceeds with an adaptive step size or not
'KISAO:0000486': ('maximum_iterations', int), # [nleq] the maximum number of iterations the algorithm should take before exiting
'KISAO:0000487': ('minimum_damping', float), # [nleq] minimum damping value
'KISAO:0000488': ('seed', int), # the seed for stochastic runs of the algorithm
}
######################################################################################################################
# Interface functions
######################################################################################################################
# The functions listed in this section are the only functions one should interact with this module.
# We try to keep these back-wards compatible and keep the function signatures.
#
# All other function and class signatures can change.
######################################################################################################################
def sedmlToPython(inputStr, workingDir=None):
""" Convert sedml file to python code.
:param inputStr: full path name to SedML model or SED-ML string
:type inputStr: path
:return: generated python code
"""
factory = SEDMLCodeFactory(inputStr, workingDir=workingDir)
return factory.toPython()
def executeSEDML(inputStr, workingDir=None):
""" Run a SED-ML file or combine archive with results.
If a workingDir is provided the files and results are written in the workingDir.
:param inputStr:
:type inputStr:
:return:
:rtype:
"""
# execute the sedml
factory = SEDMLCodeFactory(inputStr, workingDir=workingDir)
factory.executePython()
def combineArchiveToPython(omexPath):
""" All python code generated from given combine archive.
:param omexPath:
:return: dictionary of { sedml_location: pycode }
"""
tmp_dir = tempfile.mkdtemp()
pycode = {}
try:
omex.extractCombineArchive(omexPath, directory=tmp_dir, method="zip")
locations = omex.getLocationsByFormat(omexPath, "sed-ml")
sedml_files = [os.path.join(tmp_dir, loc) for loc in locations]
for k, sedml_file in enumerate(sedml_files):
pystr = sedmlToPython(sedml_file)
pycode[locations[k]] = pystr
finally:
shutil.rmtree(tmp_dir)
return pycode
def executeCombineArchive(omexPath,
workingDir=None,
printPython=False,
createOutputs=True,
saveOutputs=False,
outputDir=None,
plottingEngine=None):
""" Run all SED-ML simulations in given COMBINE archive.
If no workingDir is provided execution is performed in temporary directory
which is cleaned afterwards.
The executed code can be printed via the 'printPython' flag.
:param omexPath: OMEX Combine archive
:param workingDir: directory to extract archive to
:param printPython: boolean switch to print executed python code
:param createOutputs: boolean flag if outputs should be created, i.e. reports and plots
:param saveOutputs: flag if the outputs should be saved to file
:param outputDir: directory where the outputs should be written
:param plottingEngin: string of which plotting engine to use; uses set plotting engine otherwise
:return dictionary of sedmlFile:data generators
"""
# combine archives are zip format
if zipfile.is_zipfile(omexPath):
try:
tmp_dir = tempfile.mkdtemp()
if workingDir is None:
extractDir = tmp_dir
else:
if not os.path.exists(workingDir):
raise IOError("workingDir does not exist, make sure to create the directoy: '{}'".format(workingDir))
extractDir = workingDir
# extract
omex.extractCombineArchive(omexPath=omexPath, directory=extractDir)
# get sedml locations by omex
sedml_locations = omex.getLocationsByFormat(omexPath=omexPath, formatKey="sed-ml", method="omex")
if len(sedml_locations) == 0:
# falling back to zip archive
sedml_locations = omex.getLocationsByFormat(omexPath=omexPath, formatKey="sed-ml", method="zip")
warnings.warn(
"No SED-ML files in COMBINE archive based on manifest '{}'; Guessed SED-ML {}".format(omexPath, sedml_locations))
# run all sedml files
results = {}
sedml_paths = [os.path.join(extractDir, loc) for loc in sedml_locations]
for sedmlFile in sedml_paths:
factory = SEDMLCodeFactory(sedmlFile,
workingDir=os.path.dirname(sedmlFile),
createOutputs=createOutputs,
saveOutputs=saveOutputs,
outputDir=outputDir,
plottingEngine=plottingEngine
)
if printPython:
code = factory.toPython()
print(code)
results[sedmlFile] = factory.executePython()
return results
finally:
shutil.rmtree(tmp_dir)
else:
if not os.path.exists(omexPath):
raise FileNotFoundError("File does not exist: {}".format(omexPath))
else:
raise IOError("File is not an OMEX Combine Archive in zip format: {}".format(omexPath))
######################################################################################################################
class SEDMLCodeFactory(object):
""" Code Factory generating executable code."""
# template location
TEMPLATE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
def __init__(self, inputStr,
workingDir=None,
createOutputs=True,
saveOutputs=False,
outputDir=None,
plottingEngine=None
):
""" Create CodeFactory for given input.
:param inputStr:
:param workingDir:
:param createOutputs: if outputs should be created
:return:
:rtype:
"""
self.inputStr = inputStr
self.workingDir = workingDir
self.python = sys.version
self.platform = platform.platform()
self.createOutputs = createOutputs
self.saveOutputs = saveOutputs
self.outputDir = outputDir
self.plotFormat = "pdf"
self.reportFormat = "csv"
if not plottingEngine:
plottingEngine = te.getPlottingEngine()
self.plottingEngine = plottingEngine
if self.outputDir:
if not os.path.exists(outputDir):
raise IOError("outputDir does not exist: {}".format(outputDir))
info = SEDMLTools.readSEDMLDocument(inputStr, workingDir)
self.doc = info['doc']
self.inputType = info['inputType']
self.workingDir = info['workingDir']
# parse the models (resolve the source models & the applied changes for all models)
model_sources, model_changes = SEDMLTools.resolveModelChanges(self.doc)
self.model_sources = model_sources
self.model_changes = model_changes
def __str__(self):
""" Print.
:return:
:rtype:
"""
lines = [
'{}'.format(self.__class__),
'doc: {}'.format(self.doc),
'workingDir: {}'.format(self.workingDir),
'inputType: {}'.format(self.inputType)
]
if self.inputType != SEDMLTools.INPUT_TYPE_STR:
lines.append('input: {}'.format(self.inputStr))
return '\n'.join(lines)
def sedmlString(self):
""" Get the SEDML XML string of the current document.
:return: SED-ML XML
:rtype: str
"""
return libsedml.writeSedMLToString(self.doc)
def toPython(self, python_template='tesedml_template.template'):
""" Create python code by rendering the python template.
Uses the information in the SED-ML document to create
python code
Renders the respective template.
:return: returns the rendered template
:rtype: str
"""
# template environment
env = jinja2.Environment(loader=jinja2.FileSystemLoader(self.TEMPLATE_DIR),
extensions=['jinja2.ext.autoescape'],
trim_blocks=True,
lstrip_blocks=True)
# additional filters
# for key in sedmlfilters.filters:
# env.filters[key] = getattr(sedmlfilters, key)
template = env.get_template(python_template)
env.globals['modelToPython'] = self.modelToPython
env.globals['dataDescriptionToPython'] = self.dataDescriptionToPython
env.globals['taskToPython'] = self.taskToPython
env.globals['dataGeneratorToPython'] = self.dataGeneratorToPython
env.globals['outputToPython'] = self.outputToPython
# timestamp
time = datetime.datetime.now()
timestamp = time.strftime('%Y-%m-%dT%H:%M:%S')
# Context
c = {
'version': te.getTelluriumVersion(),
'timestamp': timestamp,
'factory': self,
'doc': self.doc,
'model_sources': self.model_sources,
'model_changes': self.model_changes,
}
pysedml = template.render(c)
return pysedml
def executePython(self):
""" Executes python code.
The python code is created during the function call.
See :func:`createpython`
:return: returns dictionary of information with keys
"""
result = {}
code = self.toPython()
result['code'] = code
result['platform'] = platform.platform()
# FIXME: better solution for exec traceback
filename = os.path.join(tempfile.gettempdir(), 'te-generated-sedml.py')
try:
# Use of exec carries the usual security warnings
symbols = {}
exec(compile(code, filename, 'exec'), symbols)
# read information from exec symbols
dg_data = {}
for dg in self.doc.getListOfDataGenerators():
dg_id = dg.getId()
dg_data[dg_id] = symbols[dg_id]
result['dataGenerators'] = dg_data
return result
except:
# leak this tempfile just so we can see a full stack trace. freaking python.
with open(filename, 'w') as f:
f.write(code)
raise
def modelToPython(self, model):
""" Python code for SedModel.
:param model: SedModel instance
:type model: SedModel
:return: python str
:rtype: str
"""
lines = []
mid = model.getId()
language = model.getLanguage()
source = self.model_sources[mid]
if not language:
warnings.warn("No model language specified, defaulting to SBML for: {}".format(source))
def isUrn():
return source.startswith('urn') or source.startswith('URN')
def isHttp():
return source.startswith('http') or source.startswith('HTTP')
# read SBML
if 'sbml' in language or len(language) == 0:
if isUrn():
lines.append("import tellurium.temiriam as temiriam")
lines.append("__{}_sbml = temiriam.getSBMLFromBiomodelsURN('{}')".format(mid, source))
lines.append("{} = te.loadSBMLModel(__{}_sbml)".format(mid, mid))
elif isHttp():
lines.append("{} = te.loadSBMLModel('{}')".format(mid, source))
else:
lines.append("{} = te.loadSBMLModel(os.path.join(workingDir, '{}'))".format(mid, source))
# read CellML
elif 'cellml' in language:
warnings.warn("CellML model encountered. Tellurium CellML support is very limited.".format(language))
if isHttp():
lines.append("{} = te.loadCellMLModel('{}')".format(mid, source))
else:
lines.append("{} = te.loadCellMLModel(os.path.join(workingDir, '{}'))".format(mid, self.model_sources[mid]))
# other
else:
warnings.warn("Unsupported model language: '{}'.".format(language))
# apply model changes
for change in self.model_changes[mid]:
lines.extend(SEDMLCodeFactory.modelChangeToPython(model, change))
return '\n'.join(lines)
@staticmethod
def modelChangeToPython(model, change):
""" Creates the apply change python string for given model and change.
Currently only a very limited subset of model changes is supported.
Namely changes of parameters and concentrations within a SedChangeAttribute.
:param model: given model
:type model: SedModel
:param change: model change
:type change: SedChange
:return:
:rtype: str
"""
lines = []
mid = model.getId()
xpath = change.getTarget()
if change.getTypeCode() == libsedml.SEDML_CHANGE_ATTRIBUTE:
# resolve target change
value = change.getNewValue()
lines.append("# {} {}".format(xpath, value))
lines.append(SEDMLCodeFactory.targetToPython(xpath, value, modelId=mid))
elif change.getTypeCode() == libsedml.SEDML_CHANGE_COMPUTECHANGE:
variables = {}
for par in change.getListOfParameters():
variables[par.getId()] = par.getValue()
for var in change.getListOfVariables():
vid = var.getId()
selection = SEDMLCodeFactory.selectionFromVariable(var, mid)
expr = selection.id
if selection.type == "concentration":
expr = "init([{}])".format(selection.id)
elif selection.type == "amount":
expr = "init({})".format(selection.id)
lines.append("__var__{} = {}['{}']".format(vid, mid, expr))
variables[vid] = "__var__{}".format(vid)
# value is calculated with the current state of model
value = evaluableMathML(change.getMath(), variables=variables)
lines.append(SEDMLCodeFactory.targetToPython(xpath, value, modelId=mid))
elif change.getTypeCode() in [libsedml.SEDML_CHANGE_REMOVEXML,
libsedml.SEDML_CHANGE_ADDXML,
libsedml.SEDML_CHANGE_CHANGEXML]:
lines.append("# Unsupported change: {}".format(change.getElementName()))
warnings.warn("Unsupported change: {}".format(change.getElementName()))
else:
lines.append("# Unsupported change: {}".format(change.getElementName()))
warnings.warn("Unsupported change: {}".format(change.getElementName()))
return lines
def dataDescriptionToPython(self, dataDescription):
""" Python code for DataDescription.
:param dataDescription: SedModel instance
:type dataDescription: DataDescription
:return: python str
:rtype: str
"""
lines = []
from tellurium.sedml.data import DataDescriptionParser
data_sources = DataDescriptionParser.parse(dataDescription, self.workingDir)
def data_to_string(data):
info = np.array2string(data)
# cleaner string and NaN handling
info = info.replace('\n', ', ').replace('\r', '').replace('nan', 'np.nan')
return info
for sid, data in data_sources.items():
# handle the 1D shapes
if len(data.shape) == 1:
data = np.reshape(data.values, (data.shape[0], 1))
array_str = data_to_string(data)
lines.append("{} = np.array({})".format(sid, array_str))
return '\n'.join(lines)
################################################################################################
# Here the main work is done,
# transformation of tasks to python code
################################################################################################
@staticmethod
def taskToPython(doc, task):
""" Create python for arbitrary task (repeated or simple).
:param doc:
:type doc:
:param task:
:type task:
:return:
:rtype:
"""
# If no DataGenerator references the task, no execution is necessary
dgs = SEDMLCodeFactory.getDataGeneratorsForTask(doc, task)
if len(dgs) == 0:
return "# not part of any DataGenerator: {}".format(task.getId())
# tasks contain other subtasks, which can contain subtasks. This
# results in a tree of task dependencies where the
# simple tasks are the node leaves. These tree has to be resolved to
# generate code for more complex task dependencies.
# resolve task tree (order & dependency of tasks) & generate code
taskTree = SEDMLCodeFactory.createTaskTree(doc, rootTask=task)
return SEDMLCodeFactory.taskTreeToPython(doc, tree=taskTree)
class TaskNode(object):
""" Tree implementation of task tree. """
def __init__(self, task, depth):
self.task = task
self.depth = depth
self.children = []
self.parent = None
def add_child(self, obj):
obj.parent = self
self.children.append(obj)
def is_leaf(self):
return len(self.children) == 0
def __str__(self):
lines = ["<[{}] {} ({})>".format(self.depth, self.task.getId(), self.task.getElementName())]
for child in self.children:
child_str = child.__str__()
lines.extend(["\t{}".format(line) for line in child_str.split('\n')])
return "\n".join(lines)
def info(self):
return "<[{}] {} ({})>".format(self.depth, self.task.getId(), self.task.getElementName())
def __iter__(self):
""" Depth-first iterator which yields TaskNodes."""
yield self
for child in self.children:
for node in child:
yield node
class Stack(object):
""" Stack implementation for nodes."""
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[len(self.items)-1]
def size(self):
return len(self.items)
def __str__(self):
return "stack: " + str([item.info() for item in self.items])
@staticmethod
def createTaskTree(doc, rootTask):
""" Creates the task tree.
Required for resolution of order of all simulations.
"""
def add_children(node):
typeCode = node.task.getTypeCode()
if typeCode == libsedml.SEDML_TASK:
return # no children
elif typeCode == libsedml.SEDML_TASK_REPEATEDTASK:
# add the ordered list of subtasks as children
subtasks = SEDMLCodeFactory.getOrderedSubtasks(node.task)
for st in subtasks:
# get real task for subtask
t = doc.getTask(st.getTask())
child = SEDMLCodeFactory.TaskNode(t, depth=node.depth+1)
node.add_child(child)
# recursive adding of children
add_children(child)
else:
raise IOError('Unsupported task type: {}'.format(node.task.getElementName()))
# create root
root = SEDMLCodeFactory.TaskNode(rootTask, depth=0)
# recursive adding of children
add_children(root)
return root
@staticmethod
def getOrderedSubtasks(task):
""" Ordered list of subtasks for task."""
subtasks = task.getListOfSubTasks()
subtaskOrder = [st.getOrder() for st in subtasks]
# sort by order, if all subtasks have order (not required)
if all(subtaskOrder) != None:
subtasks = [st for (stOrder, st) in sorted(zip(subtaskOrder, subtasks))]
return subtasks
@staticmethod
def taskTreeToPython(doc, tree):
""" Python code generation from task tree. """
# go forward through task tree
lines = []
nodeStack = SEDMLCodeFactory.Stack()
treeNodes = [n for n in tree]
# iterate over the tree
for kn, node in enumerate(treeNodes):
taskType = node.task.getTypeCode()
# Create information for task
# We are going down in the tree
if taskType == libsedml.SEDML_TASK_REPEATEDTASK:
taskLines = SEDMLCodeFactory.repeatedTaskToPython(doc, node=node)
elif taskType == libsedml.SEDML_TASK:
tid = node.task.getId()
taskLines = SEDMLCodeFactory.simpleTaskToPython(doc=doc, node=node)
else:
lines.append("# Unsupported task: {}".format(taskType))
warnings.warn("Unsupported task: {}".format(taskType))
lines.extend([" "*node.depth + line for line in taskLines])
'''
@staticmethod
def simpleTaskToPython(doc, task):
""" Create python for simple task. """
for ksub, subtask in enumerate(subtasks):
t = doc.getTask(subtask.getTask())
resultVariable = "__subtask__".format(t.getId())
selections = SEDMLCodeFactory.selectionsForTask(doc=doc, task=task)
if t.getTypeCode() == libsedml.SEDML_TASK:
forLines.extend(SEDMLCodeFactory.subtaskToPython(doc, task=t,
selections=selections,
resultVariable=resultVariable))
forLines.append("{}.extend([__subtask__])".format(task.getId()))
elif t.getTypeCode() == libsedml.SEDML_TASK_REPEATEDTASK:
forLines.extend(SEDMLCodeFactory.repeatedTaskToPython(doc, task=t))
forLines.append("{}.extend({})".format(task.getId(), t.getId()))
'''
# Collect information
# We have to go back up
# Look at next node in the treeNodes (this is the next one to write)
if kn == (len(treeNodes)-1):
nextNode = None
else:
nextNode = treeNodes[kn+1]
# The next node is further up in the tree, or there is no next node
# and still nodes on the stack
if (nextNode is None) or (nextNode.depth < node.depth):
# necessary to pop nodes from the stack and close the code
test = True
while test is True:
# stack is empty
if nodeStack.size() == 0:
test = False
continue
# try to pop next one
peek = nodeStack.peek()
if (nextNode is None) or (peek.depth > nextNode.depth):
# TODO: reset evaluation has to be defined here
# determine if it's steady state
# if taskType == libsedml.SEDML_TASK_REPEATEDTASK:
# print('task {}'.format(node.task.getId()))
# print(' peek {}'.format(peek.task.getId()))
if node.task.getTypeCode() == libsedml.SEDML_TASK_REPEATEDTASK:
# if peek.task.getTypeCode() == libsedml.SEDML_TASK_REPEATEDTASK:
# sid = task.getSimulationReference()
# simulation = doc.getSimulation(sid)
# simType = simulation.getTypeCode()
# if simType is libsedml.SEDML_SIMULATION_STEADYSTATE:
terminator = 'terminate_trace({})'.format(node.task.getId())
else:
terminator = '{}'.format(node.task.getId())
lines.extend([
"",
# " "*node.depth + "{}.extend({})".format(peek.task.getId(), terminator),
" " * node.depth + "{}.extend({})".format(peek.task.getId(), node.task.getId()),
])
node = nodeStack.pop()
else:
test = False
else:
# we are going done or next subtask -> put node on stack
nodeStack.push(node)
return "\n".join(lines)
@staticmethod
def simpleTaskToPython(doc, node):
""" Creates the simulation python code for a given taskNode.
The taskNodes are required to handle the relationships between
RepeatedTasks, SubTasks and SimpleTasks (Task).
:param doc: sedml document
:type doc: SEDDocument
:param node: taskNode of the current task
:type node: TaskNode
:return:
:rtype:
"""
lines = []
task = node.task
lines.append("# Task: <{}>".format(task.getId()))
lines.append("{} = [None]".format(task.getId()))
mid = task.getModelReference()
sid = task.getSimulationReference()
simulation = doc.getSimulation(sid)
simType = simulation.getTypeCode()
algorithm = simulation.getAlgorithm()
if algorithm is None:
warnings.warn("Algorithm missing on simulation, defaulting to 'cvode: KISAO:0000019'")
algorithm = simulation.createAlgorithm()
algorithm.setKisaoID("KISAO:0000019")
kisao = algorithm.getKisaoID()
# is supported algorithm
if not SEDMLCodeFactory.isSupportedAlgorithmForSimulationType(kisao=kisao, simType=simType):
warnings.warn("Algorithm {} unsupported for simulation {} type {} in task {}".format(kisao, simulation.getId(), simType, task.getId()))
lines.append("# Unsupported Algorithm {} for SimulationType {}".format(kisao, simulation.getElementName()))
return lines
# set integrator/solver
integratorName = SEDMLCodeFactory.getIntegratorNameForKisaoID(kisao)
if not integratorName:
warnings.warn("No integrator exists for {} in roadrunner".format(kisao))
return lines
if simType is libsedml.SEDML_SIMULATION_STEADYSTATE:
lines.append("{}.setSteadyStateSolver('{}')".format(mid, integratorName))
else:
lines.append("{}.setIntegrator('{}')".format(mid, integratorName))
# use fixed step by default for stochastic sims
if integratorName == 'gillespie':
lines.append("{}.integrator.setValue('{}', {})".format(mid, 'variable_step_size', False))
if kisao == "KISAO:0000288": # BDF
lines.append("{}.integrator.setValue('{}', {})".format(mid, 'stiff', True))
elif kisao == "KISAO:0000280": # Adams-Moulton
lines.append("{}.integrator.setValue('{}', {})".format(mid, 'stiff', False))
# integrator/solver settings (AlgorithmParameters)
for par in algorithm.getListOfAlgorithmParameters():
pkey = SEDMLCodeFactory.algorithmParameterToParameterKey(par)
# only set supported algorithm paramters
if pkey:
if pkey.dtype is str:
value = "'{}'".format(pkey.value)
else:
value = pkey.value
if value == str('inf') or pkey.value == float('inf'):
value = "float('inf')"
else:
pass
if simType is libsedml.SEDML_SIMULATION_STEADYSTATE:
lines.append("{}.steadyStateSolver.setValue('{}', {})".format(mid, pkey.key, value))
else:
lines.append("{}.integrator.setValue('{}', {})".format(mid, pkey.key, value))
if simType is libsedml.SEDML_SIMULATION_STEADYSTATE:
lines.append("if {model}.conservedMoietyAnalysis == False: {model}.conservedMoietyAnalysis = True".format(model=mid))
else:
lines.append("if {model}.conservedMoietyAnalysis == True: {model}.conservedMoietyAnalysis = False".format(model=mid))
# get parents
parents = []
parent = node.parent
while parent is not None:
parents.append(parent)
parent = parent.parent
# <selections> of all parents
# ---------------------------
selections = SEDMLCodeFactory.selectionsForTask(doc=doc, task=node.task)
for p in parents:
selections.update(SEDMLCodeFactory.selectionsForTask(doc=doc, task=p.task))
# <setValues> of all parents
# ---------------------------
# apply changes based on current variables, parameters and range variables
for parent in reversed(parents):
rangeId = parent.task.getRangeId()
helperRanges = {}
for r in parent.task.getListOfRanges():
if r.getId() != rangeId:
helperRanges[r.getId()] = r
for setValue in parent.task.getListOfTaskChanges():
variables = {}
# range variables
variables[rangeId] = "__value__{}".format(rangeId)
for key in helperRanges.keys():
variables[key] = "__value__{}".format(key)
# parameters
for par in setValue.getListOfParameters():
variables[par.getId()] = par.getValue()
for var in setValue.getListOfVariables():
vid = var.getId()
mid = var.getModelReference()
selection = SEDMLCodeFactory.selectionFromVariable(var, mid)
expr = selection.id
if selection.type == 'concentration':
expr = "init([{}])".format(selection.id)
elif selection.type == 'amount':
expr = "init({})".format(selection.id)
# create variable
lines.append("__value__{} = {}['{}']".format(vid, mid, expr))
# variable for replacement
variables[vid] = "__value__{}".format(vid)
# value is calculated with the current state of model
lines.append(SEDMLCodeFactory.targetToPython(xpath=setValue.getTarget(),
value=evaluableMathML(setValue.getMath(), variables=variables),
modelId=setValue.getModelReference())
)
# handle result variable
resultVariable = "{}[0]".format(task.getId())
# -------------------------------------------------------------------------
# <UNIFORM TIMECOURSE>
# -------------------------------------------------------------------------
if simType == libsedml.SEDML_SIMULATION_UNIFORMTIMECOURSE:
lines.append("{}.timeCourseSelections = {}".format(mid, list(selections)))
initialTime = simulation.getInitialTime()
outputStartTime = simulation.getOutputStartTime()
outputEndTime = simulation.getOutputEndTime()
numberOfPoints = simulation.getNumberOfPoints()
# reset before simulation (see https://github.com/sys-bio/tellurium/issues/193)
lines.append("{}.reset()".format(mid))
# throw some points away
if abs(outputStartTime - initialTime) > 1E-6:
lines.append("{}.simulate(start={}, end={}, points=2)".format(
mid, initialTime, outputStartTime))
# real simulation
lines.append("{} = {}.simulate(start={}, end={}, steps={})".format(
resultVariable, mid, outputStartTime, outputEndTime, numberOfPoints))
# -------------------------------------------------------------------------
# <ONESTEP>
# -------------------------------------------------------------------------
elif simType == libsedml.SEDML_SIMULATION_ONESTEP:
lines.append("{}.timeCourseSelections = {}".format(mid, list(selections)))
step = simulation.getStep()
lines.append("{} = {}.simulate(start={}, end={}, points=2)".format(resultVariable, mid, 0.0, step))
# -------------------------------------------------------------------------
# <STEADY STATE>
# -------------------------------------------------------------------------
elif simType == libsedml.SEDML_SIMULATION_STEADYSTATE:
lines.append("{}.steadyStateSolver.setValue('{}', {})".format(mid, 'allow_presimulation', False))
lines.append("{}.steadyStateSelections = {}".format(mid, list(selections)))
lines.append("{}.simulate()".format(mid)) # for stability of the steady state solver
lines.append("{} = {}.steadyStateNamedArray()".format(resultVariable, mid))
# no need to turn this off because it will be checked before the next simulation
# lines.append("{}.conservedMoietyAnalysis = False".format(mid))
# -------------------------------------------------------------------------
# <OTHER>
# -------------------------------------------------------------------------
else:
lines.append("# Unsupported simulation: {}".format(simType))
return lines
@staticmethod
def repeatedTaskToPython(doc, node):
""" Create python for RepeatedTask.
Must create
- the ranges (Ranges)
- apply all changes (SetValues)
"""
# storage of results
task = node.task
lines = ["", "{} = []".format(task.getId())]
# <Range Definition>
# master range
rangeId = task.getRangeId()
masterRange = task.getRange(rangeId)
if masterRange.getTypeCode() == libsedml.SEDML_RANGE_UNIFORMRANGE:
lines.extend(SEDMLCodeFactory.uniformRangeToPython(masterRange))
elif masterRange.getTypeCode() == libsedml.SEDML_RANGE_VECTORRANGE:
lines.extend(SEDMLCodeFactory.vectorRangeToPython(masterRange))
elif masterRange.getTypeCode() == libsedml.SEDML_RANGE_FUNCTIONALRANGE:
warnings.warn("FunctionalRange for master range not supported in task.")
# lock-in ranges
for r in task.getListOfRanges():
if r.getId() != rangeId:
if r.getTypeCode() == libsedml.SEDML_RANGE_UNIFORMRANGE:
lines.extend(SEDMLCodeFactory.uniformRangeToPython(r))
elif r.getTypeCode() == libsedml.SEDML_RANGE_VECTORRANGE:
lines.extend(SEDMLCodeFactory.vectorRangeToPython(r))
# <Range Iteration>
# iterate master range
lines.append("for __k__{}, __value__{} in enumerate(__range__{}):".format(rangeId, rangeId, rangeId))
# Everything from now on is done in every iteration of the range
# We have to collect & intent all lines in the loop)
forLines = []
# definition of lock-in ranges
helperRanges = {}
for r in task.getListOfRanges():
if r.getId() != rangeId:
helperRanges[r.getId()] = r
if r.getTypeCode() in [libsedml.SEDML_RANGE_UNIFORMRANGE,
libsedml.SEDML_RANGE_VECTORRANGE]:
forLines.append("__value__{} = __range__{}[__k__{}]".format(r.getId(), r.getId(), rangeId))
# <functional range>
if r.getTypeCode() == libsedml.SEDML_RANGE_FUNCTIONALRANGE:
variables = {}
# range variables
variables[rangeId] = "__value__{}".format(rangeId)
for key in helperRanges.keys():
variables[key] = "__value__{}".format(key)
# parameters
for par in r.getListOfParameters():
variables[par.getId()] = par.getValue()
for var in r.getListOfVariables():
vid = var.getId()
mid = var.getModelReference()
selection = SEDMLCodeFactory.selectionFromVariable(var, mid)
expr = selection.id
if selection.type == 'concentration':
expr = "[{}]".format(selection.id)
lines.append("__value__{} = {}['{}']".format(vid, mid, expr))
variables[vid] = "__value__{}".format(vid)
# value is calculated with the current state of model
value = evaluableMathML(r.getMath(), variables=variables)
forLines.append("__value__{} = {}".format(r.getId(), value))
# <resetModels>
# models to reset via task tree below node
mids = set([])
for child in node:
t = child.task
if t.getTypeCode() == libsedml.SEDML_TASK:
mids.add(t.getModelReference())
# reset models referenced in tree below task
for mid in mids:
if task.getResetModel():
# reset before every iteration
forLines.append("{}.reset()".format(mid))
else:
# reset before first iteration
forLines.append("if __k__{} == 0:".format(rangeId))
forLines.append(" {}.reset()".format(mid))
# add lines
lines.extend(' ' + line for line in forLines)
return lines
################################################################################################
@staticmethod
def getDataGeneratorsForTask(doc, task):
""" Get the DataGenerators which reference the given task.
:param doc:
:type doc:
:param task:
:type task:
:return:
:rtype:
"""
dgs = []
for dg in doc.getListOfDataGenerators():
for var in dg.getListOfVariables():
if var.getTaskReference() == task.getId():
dgs.append(dg)
break # the DataGenerator is added, no need to look at rest of variables
return dgs
@staticmethod
def selectionsForTask(doc, task):
""" Populate variable lists from the data generators for the given task.
These are the timeCourseSelections and steadyStateSelections
in RoadRunner.
Search all data generators for variables which have to be part of the simulation.
"""
modelId = task.getModelReference()
selections = set()
for dg in doc.getListOfDataGenerators():
for var in dg.getListOfVariables():
if var.getTaskReference() == task.getId():
selection = SEDMLCodeFactory.selectionFromVariable(var, modelId)
expr = selection.id
if selection.type == "concentration":
expr = "[{}]".format(selection.id)
selections.add(expr)
return selections
@staticmethod
def uniformRangeToPython(r):
""" Create python lines for uniform range.
:param r:
:type r:
:return:
:rtype:
"""
lines = []
rId = r.getId()
rStart = r.getStart()
rEnd = r.getEnd()
rPoints = r.getNumberOfPoints()+1 # One point more than number of points
rType = r.getType()
if rType in ['Linear', 'linear']:
lines.append("__range__{} = np.linspace(start={}, stop={}, num={})".format(rId, rStart, rEnd, rPoints))
elif rType in ['Log', 'log']:
lines.append("__range__{} = np.logspace(start={}, stop={}, num={})".format(rId, rStart, rEnd, rPoints))
else:
warnings.warn("Unsupported range type in UniformRange: {}".format(rType))
return lines
@staticmethod
def vectorRangeToPython(r):
lines = []
__range = np.zeros(shape=[r.getNumValues()])
for k, v in enumerate(r.getValues()):
__range[k] = v
lines.append("__range__{} = {}".format(r.getId(), list(__range)))
return lines
@staticmethod
def isSupportedAlgorithmForSimulationType(kisao, simType):
""" Check Algorithm Kisao Id is supported for simulation.
:return: is supported
:rtype: bool
"""
supported = []
if simType == libsedml.SEDML_SIMULATION_UNIFORMTIMECOURSE:
supported = KISAOS_UNIFORMTIMECOURSE
elif simType == libsedml.SEDML_SIMULATION_ONESTEP:
supported = KISAOS_ONESTEP
elif simType == libsedml.SEDML_SIMULATION_STEADYSTATE:
supported = KISAOS_STEADYSTATE
return kisao in supported
@staticmethod
def getIntegratorNameForKisaoID(kid):
""" RoadRunner integrator name for algorithm KisaoID.
:param kid: KisaoID
:type kid: str
:return: RoadRunner integrator name.
:rtype: str
"""
if kid in KISAOS_NLEQ:
return 'nleq2'
if kid in KISAOS_CVODE:
return 'cvode'
if kid in KISAOS_GILLESPIE:
return 'gillespie'
if kid in KISAOS_RK4:
return 'rk4'
if kid in KISAOS_RK45:
return 'rk45'
if kid in KISAOS_LSODA:
warnings.warn('Tellurium does not support LSODA. Using CVODE instead.')
return 'cvode' # just use cvode
return None
@staticmethod
def algorithmParameterToParameterKey(par):
""" Resolve the mapping between parameter keys and roadrunner integrator keys."""
ParameterKey = namedtuple('ParameterKey', 'key value dtype')
kid = par.getKisaoID()
value = par.getValue()
if kid in KISAOS_ALGORITHMPARAMETERS:
# algorithm parameter is in the list of parameters
key, dtype = KISAOS_ALGORITHMPARAMETERS[kid]
if dtype is bool:
# transform manually ! (otherwise all strings give True)
if value == 'true':
value = True
elif value == 'false':
value = False
else:
# cast to data type of parameter
value = dtype(value)
return ParameterKey(key, value, dtype)
else:
# algorithm parameter not supported
warnings.warn("Unsupported AlgorithmParameter: {} = {})".format(kid, value))
return None
@staticmethod
def targetToPython(xpath, value, modelId):
""" Creates python line for given xpath target and value.
:param xpath:
:type xpath:
:param value:
:type value:
:return:
:rtype:
"""
target = SEDMLCodeFactory._resolveXPath(xpath, modelId)
if target:
# initial concentration
if target.type == "concentration":
expr = 'init([{}])'.format(target.id)
# initial amount
elif target.type == "amount":
expr = 'init({})'.format(target.id)
# other (parameter, flux, ...)
else:
expr = target.id
line = ("{}['{}'] = {}".format(modelId, expr, value))
else:
line = ("# Unsupported target xpath: {}".format(xpath))
return line
@staticmethod
def selectionFromVariable(var, modelId):
""" Resolves the selection for the given variable.
First checks if the variable is a symbol and returns the symbol.
If no symbol is set the xpath of the target is resolved
and used in the selection
:param var: variable to resolve
:type var: SedVariable
:return: a single selection
:rtype: Selection (namedtuple: id type)
"""
Selection = namedtuple('Selection', 'id type')
# parse symbol expression
if var.isSetSymbol():
cvs = var.getSymbol()
astr = cvs.rsplit("symbol:")
sid = astr[1]
return Selection(sid, 'symbol')
# use xpath
elif var.isSetTarget():
xpath = var.getTarget()
target = SEDMLCodeFactory._resolveXPath(xpath, modelId)
return Selection(target.id, target.type)
else:
warnings.warn("Unrecognized Selection in variable")
return None
@staticmethod
def _resolveXPath(xpath, modelId):
""" Resolve the target from the xpath expression.
A single target in the model corresponding to the modelId is resolved.
Currently, the model is not used for xpath resolution.
:param xpath: xpath expression.
:type xpath: str
:param modelId: id of model in which xpath should be resolved
:type modelId: str
:return: single target of xpath expression
:rtype: Target (namedtuple: id type)
"""
# TODO: via better xpath expression
# get type from the SBML document for the given id.
# The xpath expression can be very general and does not need to contain the full
# xml path
# For instance:
# /sbml:sbml/sbml:model/descendant::*[@id='S1']
# has to resolve to species.
# TODO: figure out concentration or amount (from SBML document)
# FIXME: getting of sids, pids not very robust, handle more cases (rules, reactions, ...)
Target = namedtuple('Target', 'id type')
def getId(xpath):
xpath = xpath.replace('"', "'")
match = re.findall(r"id='(.*?)'", xpath)
if (match is None) or (len(match) is 0):
warnings.warn("Xpath could not be resolved: {}".format(xpath))
return match[0]
# parameter value change
if ("model" in xpath) and ("parameter" in xpath):
return Target(getId(xpath), 'parameter')
# species concentration change
elif ("model" in xpath) and ("species" in xpath):
return Target(getId(xpath), 'concentration')
# other
elif ("model" in xpath) and ("id" in xpath):
return Target(getId(xpath), 'other')
# cannot be parsed
else:
raise ValueError("Unsupported target in xpath: {}".format(xpath))
@staticmethod
def dataGeneratorToPython(doc, generator):
""" Create variable from the data generators and the simulation results and data sources.
The data of repeatedTasks is handled differently depending
on if reset=True or reset=False.
reset=True:
every repeat is a single curve, i.e. the data is a list of data
reset=False:
all curves belong to a single simulation and are concatenated to one dataset
"""
lines = []
gid = generator.getId()
mathml = generator.getMath()
# create variables
variables = {}
for par in generator.getListOfParameters():
variables[par.getId()] = par.getValue()
for var in generator.getListOfVariables():
varId = var.getId()
variables[varId] = "__var__{}".format(varId)
# create python for variables
for var in generator.getListOfVariables():
varId = var.getId()
taskId = var.getTaskReference()
task = doc.getTask(taskId)
# simulation data
if task is not None:
modelId = task.getModelReference()
selection = SEDMLCodeFactory.selectionFromVariable(var, modelId)
isTime = False
if selection.type == "symbol" and selection.id == "time":
isTime = True
resetModel = True
if task.getTypeCode() == libsedml.SEDML_TASK_REPEATEDTASK:
resetModel = task.getResetModel()
sid = selection.id
if selection.type == "concentration":
sid = "[{}]".format(selection.id)
# Series of curves
if resetModel is True:
# If each entry in the task consists of a single point (e.g. steady state scan)
# , concatenate the points. Otherwise, plot as separate curves.
lines.append("__var__{} = np.concatenate([sim['{}'] for sim in {}])".format(varId, sid, taskId))
else:
# One curve via time adjusted concatenate
if isTime is True:
lines.append("__offsets__{} = np.cumsum(np.array([sim['{}'][-1] for sim in {}]))".format(taskId, sid, taskId))
lines.append("__offsets__{} = np.insert(__offsets__{}, 0, 0)".format(taskId, taskId))
lines.append("__var__{} = np.transpose(np.array([sim['{}']+__offsets__{}[k] for k, sim in enumerate({})]))".format(varId, sid, taskId, taskId))
lines.append("__var__{} = np.concatenate(np.transpose(__var__{}))".format(varId, varId))
else:
lines.append("__var__{} = np.transpose(np.array([sim['{}'] for sim in {}]))".format(varId, sid, taskId))
lines.append("__var__{} = np.concatenate(np.transpose(__var__{}))".format(varId, varId))
lines.append("if len(__var__{}.shape) == 1:".format(varId))
lines.append(" __var__{}.shape += (1,)".format(varId))
# check for data sources
else:
target = var.getTarget()
if target.startswith('#'):
sid = target[1:]
lines.append("__var__{} = {}".format(varId, sid))
else:
warnings.warn("Unknown target in variable, no reference to SId: {}".format(target))
# calculate data generator
value = evaluableMathML(mathml, variables=variables, array=True)
lines.append("{} = {}".format(gid, value))
return "\n".join(lines)
def outputToPython(self, doc, output):
""" Create output """
lines = []
typeCode = output.getTypeCode()
if typeCode == libsedml.SEDML_OUTPUT_REPORT:
lines.extend(SEDMLCodeFactory.outputReportToPython(self, doc, output))
elif typeCode == libsedml.SEDML_OUTPUT_PLOT2D:
lines.extend(SEDMLCodeFactory.outputPlot2DToPython(self, doc, output))
elif typeCode == libsedml.SEDML_OUTPUT_PLOT3D:
lines.extend(SEDMLCodeFactory.outputPlot3DToPython(self, doc, output))
else:
warnings.warn("# Unsupported output type '{}' in output {}".format(output.getElementName(), output.getId()))
return '\n'.join(lines)
def outputReportToPython(self, doc, output):
""" OutputReport
:param doc:
:type doc: SedDocument
:param output:
:type output: SedOutputReport
:return: list of python lines
:rtype: list(str)
"""
lines = []
headers = []
dgIds = []
columns = []
for dataSet in output.getListOfDataSets():
# these are the columns
headers.append(dataSet.getLabel())
# data generator (the id is the id of the data in python)
dgId = dataSet.getDataReference()
dgIds.append(dgId)
columns.append("{}[:,k]".format(dgId))
# create data frames for the repeats
lines.append("__dfs__{} = []".format(output.getId()))
lines.append("for k in range({}.shape[1]):".format(dgIds[0]))
lines.append(" __df__k = pandas.DataFrame(np.column_stack(" + str(columns).replace("'", "") + "), \n columns=" + str(headers) + ")")
lines.append(" __dfs__{}.append(__df__k)".format(output.getId()))
# save as variable in Tellurium
lines.append(" te.setLastReport(__df__k)".format(output.getId()))
if self.saveOutputs and self.createOutputs:
lines.append(
" filename = os.path.join('{}', '{}.{}')".format(self.outputDir, output.getId(), self.reportFormat))
lines.append(
" __df__k.to_csv(filename, sep=',', index=False)".format(output.getId()))
lines.append(
" print('Report {}: {{}}'.format(filename))".format(output.getId()))
return lines
@staticmethod
def outputPlotSettings():
""" Settings for all plot types.
:return:
:rtype:
"""
PlotSettings = namedtuple('PlotSettings', 'colors, figsize, dpi, facecolor, edgecolor, linewidth, marker, markersize, alpha')
# all lines of same cuve have same color
settings = PlotSettings(
colors=[u'C0', u'C1', u'C2', u'C3', u'C4', u'C5', u'C6', u'C7', u'C8', u'C9'],
figsize=(9, 5),
dpi=80,
facecolor='w',
edgecolor='k',
linewidth=1.5,
marker='',
markersize=3.0,
alpha=1.0
)
return settings
def outputPlot2DToPython(self, doc, output):
""" OutputReport
If workingDir is provided the plot is saved in the workingDir.
:param doc:
:type doc: SedDocument
:param output:
:type output: SedOutputReport
:return: list of python lines
:rtype: list(str)
"""
# TODO: logX and logY not applied
lines = []
settings = SEDMLCodeFactory.outputPlotSettings()
# figure title
title = output.getId()
if output.isSetName():
title = "{}".format(output.getName())
# xtitle
oneXLabel = True
allXLabel = None
for kc, curve in enumerate(output.getListOfCurves()):
xId = curve.getXDataReference()
dgx = doc.getDataGenerator(xId)
xLabel = xId
if dgx.isSetName():
xLabel = "{}".format(dgx.getName())
# do all curves have the same xLabel
if kc == 0:
allXLabel = xLabel
elif xLabel != allXLabel:
oneXLabel = False
xtitle = ''
if oneXLabel:
xtitle = allXLabel
lines.append("_stacked = False")
# stacking, currently disabled
# lines.append("_stacked = False")
# lines.append("_engine = te.getPlottingEngine()")
# for kc, curve in enumerate(output.getListOfCurves()):
# xId = curve.getXDataReference()
# lines.append("if {}.shape[1] > 1 and te.getDefaultPlottingEngine() == 'plotly':".format(xId))
# lines.append(" stacked=True")
lines.append("if _stacked:")
lines.append(" tefig = te.getPlottingEngine().newStackedFigure(title='{}', xtitle='{}')".format(title, xtitle))
lines.append("else:")
lines.append(" tefig = te.nextFigure(title='{}', xtitle='{}')\n".format(title, xtitle))
for kc, curve in enumerate(output.getListOfCurves()):
logX = curve.getLogX()
logY = curve.getLogY()
xId = curve.getXDataReference()
yId = curve.getYDataReference()
dgx = doc.getDataGenerator(xId)
dgy = doc.getDataGenerator(yId)
color = settings.colors[kc % len(settings.colors)]
tag = 'tag{}'.format(kc)
yLabel = yId
if curve.isSetName():
yLabel = "{}".format(curve.getName())
elif dgy.isSetName():
yLabel = "{}".format(dgy.getName())
# FIXME: add all the additional information to the plot, i.e. the settings and styles for a given curve
lines.append("for k in range({}.shape[1]):".format(xId))
lines.append(" extra_args = {}")
lines.append(" if k == 0:")
lines.append(" extra_args['name'] = '{}'".format(yLabel))
lines.append(" tefig.addXYDataset({xarr}[:,k], {yarr}[:,k], color='{color}', tag='{tag}', logx={logx}, logy={logy}, **extra_args)".format(xarr=xId, yarr=yId, color=color, tag=tag, logx=logX, logy=logY))
# FIXME: endpoints must be handled via plotting functions
# lines.append(" fix_endpoints({}[:,k], {}[:,k], color='{}', tag='{}', fig=tefig)".format(xId, yId, color, tag))
lines.append("if te.tiledFigure():\n")
lines.append(" if te.tiledFigure().renderIfExhausted():\n")
lines.append(" te.clearTiledFigure()\n")
lines.append("else:\n")
lines.append(" fig = tefig.render()\n")
if self.saveOutputs and self.createOutputs:
# FIXME: only working for matplotlib
lines.append("if str(te.getPlottingEngine()) == '<MatplotlibEngine>':".format(self.outputDir, output.getId(), self.plotFormat))
lines.append(" filename = os.path.join('{}', '{}.{}')".format(self.outputDir, output.getId(), self.plotFormat))
lines.append(" fig.savefig(filename, format='{}', bbox_inches='tight')".format(self.plotFormat))
lines.append(" print('Figure {}: {{}}'.format(filename))".format(output.getId()))
lines.append("")
return lines
def outputPlot3DToPython(self, doc, output):
""" OutputPlot3D
:param doc:
:type doc: SedDocument
:param output:
:type output: SedOutputPlot3D
:return: list of python lines
:rtype: list(str)
"""
# TODO: handle mix of log and linear axis
settings = SEDMLCodeFactory.outputPlotSettings()
lines = []
lines.append("from mpl_toolkits.mplot3d import Axes3D")
lines.append("fig = plt.figure(num=None, figsize={}, dpi={}, facecolor='{}', edgecolor='{}')".format(settings.figsize, settings.dpi, settings.facecolor, settings.edgecolor))
lines.append("from matplotlib import gridspec")
lines.append("__gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])")
lines.append("ax = plt.subplot(__gs[0], projection='3d')")
# lines.append("ax = fig.gca(projection='3d')")
title = output.getId()
if output.isSetName():
title = output.getName()
oneXLabel = True
oneYLabel = True
allXLabel = None
allYLabel = None
for kc, surf in enumerate(output.getListOfSurfaces()):
logX = surf.getLogX()
logY = surf.getLogY()
logZ = surf.getLogZ()
xId = surf.getXDataReference()
yId = surf.getYDataReference()
zId = surf.getZDataReference()
dgx = doc.getDataGenerator(xId)
dgy = doc.getDataGenerator(yId)
dgz = doc.getDataGenerator(zId)
color = settings.colors[kc % len(settings.colors)]
zLabel = zId
if surf.isSetName():
zLabel = surf.getName()
elif dgy.isSetName():
zLabel = dgz.getName()
xLabel = xId
if dgx.isSetName():
xLabel = dgx.getName()
yLabel = yId
if dgy.isSetName():
yLabel = dgy.getName()
# do all curves have the same xLabel & yLabel
if kc == 0:
allXLabel = xLabel
allYLabel = yLabel
if xLabel != allXLabel:
oneXLabel = False
if yLabel != allYLabel:
oneYLabel = False
lines.append("for k in range({}.shape[1]):".format(xId))
lines.append(" if k == 0:")
lines.append(" ax.plot({}[:,k], {}[:,k], {}[:,k], marker = '{}', color='{}', linewidth={}, markersize={}, alpha={}, label='{}')".format(xId, yId, zId, settings.marker, color, settings.linewidth, settings.markersize, settings.alpha, zLabel))
lines.append(" else:")
lines.append(" ax.plot({}[:,k], {}[:,k], {}[:,k], marker = '{}', color='{}', linewidth={}, markersize={}, alpha={})".format(xId, yId, zId, settings.marker, color, settings.linewidth, settings.markersize, settings.alpha))
lines.append("ax.set_title('{}', fontweight='bold')".format(title))
if oneXLabel:
lines.append("ax.set_xlabel('{}', fontweight='bold')".format(xLabel))
if oneYLabel:
lines.append("ax.set_ylabel('{}', fontweight='bold')".format(yLabel))
if len(output.getListOfSurfaces()) == 1:
lines.append("ax.set_zlabel('{}', fontweight='bold')".format(zLabel))
lines.append("__lg = plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)")
lines.append("__lg.draw_frame(False)")
lines.append("plt.setp(__lg.get_texts(), fontsize='small')")
lines.append("plt.setp(__lg.get_texts(), fontweight='bold')")
lines.append("plt.tick_params(axis='both', which='major', labelsize=10)")
lines.append("plt.tick_params(axis='both', which='minor', labelsize=8)")
lines.append("plt.savefig(os.path.join(workingDir, '{}.png'), dpi=100)".format(output.getId()))
lines.append("plt.show()".format(title))
return lines
##################################################################################################
class SEDMLTools(object):
""" Helper functions to work with sedml. """
INPUT_TYPE_STR = 'SEDML_STRING'
INPUT_TYPE_FILE_SEDML = 'SEDML_FILE'
INPUT_TYPE_FILE_COMBINE = 'COMBINE_FILE' # includes .sedx archives
@classmethod
def checkSEDMLDocument(cls, doc):
""" Checks the SedDocument for errors.
Raises IOError if error exists.
:param doc:
:type doc:
"""
errorlog = doc.getErrorLog()
msg = errorlog.toString()
if doc.getErrorLog().getNumFailsWithSeverity(libsedml.LIBSEDML_SEV_ERROR) > 0:
# FIXME: workaround for https://github.com/fbergmann/libSEDML/issues/47
warnings.warn(msg)
# raise IOError(msg)
if errorlog.getNumFailsWithSeverity(libsedml.LIBSEDML_SEV_FATAL) > 0:
# raise IOError(msg)
warnings.warn(msg)
if errorlog.getNumFailsWithSeverity(libsedml.LIBSEDML_SEV_WARNING) > 0:
warnings.warn(msg)
if errorlog.getNumFailsWithSeverity(libsedml.LIBSEDML_SEV_SCHEMA_ERROR) > 0:
warnings.warn(msg)
if errorlog.getNumFailsWithSeverity(libsedml.LIBSEDML_SEV_GENERAL_WARNING) > 0:
warnings.warn(msg)
@classmethod
def readSEDMLDocument(cls, inputStr, workingDir):
""" Parses SedMLDocument from given input.
:return: dictionary of SedDocument, inputType and working directory.
:rtype: {doc, inputType, workingDir}
"""
# SEDML-String
if not os.path.exists(inputStr):
try:
from xml.etree import ElementTree
x = ElementTree.fromstring(inputStr)
# is parsable xml string
doc = libsedml.readSedMLFromString(inputStr)
inputType = cls.INPUT_TYPE_STR
if workingDir is None:
workingDir = os.getcwd()
except ElementTree.ParseError:
if not os.path.exists(inputStr):
raise IOError("SED-ML String is not valid XML:", inputStr)
# SEDML-File
else:
filename, extension = os.path.splitext(os.path.basename(inputStr))
# Archive
if zipfile.is_zipfile(inputStr):
omexPath = inputStr
inputType = cls.INPUT_TYPE_FILE_COMBINE
# in case of sedx and combine a working directory is created
# in which the files are extracted
if workingDir is None:
extractDir = os.path.join(os.path.dirname(os.path.realpath(omexPath)), '_te_{}'.format(filename))
else:
extractDir = workingDir
# TODO: refactor this
# extract the archive to working directory
CombineArchive.extractArchive(omexPath, extractDir)
# get SEDML files from archive
sedmlFiles = CombineArchive.filePathsFromExtractedArchive(extractDir, filetype='sed-ml')
if len(sedmlFiles) == 0:
raise IOError("No SEDML files found in archive.")
# FIXME: there could be multiple SEDML files in archive (currently only first used)
# analogue to executeOMEX
if len(sedmlFiles) > 1:
warnings.warn("More than one sedml file in archive, only processing first one.")
sedmlFile = sedmlFiles[0]
doc = libsedml.readSedMLFromFile(sedmlFile)
# we have to work relative to the SED-ML file
workingDir = os.path.dirname(sedmlFile)
cls.checkSEDMLDocument(doc)
# SEDML single file
elif os.path.isfile(inputStr):
if extension not in [".sedml", '.xml']:
raise IOError("SEDML file should have [.sedml|.xml] extension:", inputStr)
inputType = cls.INPUT_TYPE_FILE_SEDML
doc = libsedml.readSedMLFromFile(inputStr)
cls.checkSEDMLDocument(doc)
# working directory is where the sedml file is
if workingDir is None:
workingDir = os.path.dirname(os.path.realpath(inputStr))
return {'doc': doc,
'inputType': inputType,
'workingDir': workingDir}
@staticmethod
def resolveModelChanges(doc):
""" Resolves the original source model and full change lists for models.
Going through the tree of model upwards until root is reached and
collecting changes on the way (example models m* and changes c*)
m1 (source) -> m2 (c1, c2) -> m3 (c3, c4)
resolves to
m1 (source) []
m2 (source) [c1,c2]
m3 (source) [c1,c2,c3,c4]
The order of changes is important (at least between nodes on different
levels of hierarchies), because later changes of derived models could
reverse earlier changes.
Uses recursive search strategy, which should be okay as long as the model tree hierarchy is
not getting to big.
"""
# initial dicts (handle source & change information for single node)
model_sources = {}
model_changes = {}
for m in doc.getListOfModels():
mid = m.getId()
source = m.getSource()
model_sources[mid] = source
changes = []
# store the changes unique for this model
for c in m.getListOfChanges():
changes.append(c)
model_changes[mid] = changes
# recursive search for original model and store the
# changes which have to be applied in the list of changes
def findSource(mid, changes):
# mid is node above
if mid in model_sources and not model_sources[mid] == mid:
# add changes for node
for c in model_changes[mid]:
changes.append(c)
# keep looking deeper
return findSource(model_sources[mid], changes)
# the source is no longer a key in the sources, it is the source
return mid, changes
all_changes = {}
mids = [m.getId() for m in doc.getListOfModels()]
for mid in mids:
source, changes = findSource(mid, changes=list())
model_sources[mid] = source
all_changes[mid] = changes[::-1]
return model_sources, all_changes
'''
The following functions all manipulate the DataGenenerators which
breaks many things !!!
These should be used as preprocessing before plotting, but NOT CHANGE
values or length of DataGenerator variables.
MK: cannot fix this until I did not understand how the plots are generated
for plotly.
'''
def process_trace(trace):
""" If each entry in the task consists of a single point
(e.g. steady state scan), concatenate the points.
Otherwise, plot as separate curves."""
warnings.warn("don't use this", DeprecationWarning)
# print('trace.size = {}'.format(trace.size))
# print('len(trace.shape) = {}'.format(len(trace.shape)))
if trace.size > 1:
# FIXME: this adds a nan at the end of the data. This is a bug.
if len(trace.shape) == 1:
return np.concatenate((np.atleast_1d(trace), np.atleast_1d(np.nan)))
#return np.atleast_1d(trace)
elif len(trace.shape) == 2:
#print('2d trace')
# print(trace.shape)
# FIXME: this adds a nan at the end of the data. This is a bug.
result = np.vstack((np.atleast_1d(trace), np.full((1,trace.shape[-1]),np.nan)))
#result = np.vstack((np.atleast_1d(trace), np.full((1, trace.shape[-1]))))
return result
else:
return np.atleast_1d(trace)
def terminate_trace(trace):
""" If each entry in the task consists of a single point
(e.g. steady state scan), concatenate the points.
Otherwise, plot as separate curves."""
warnings.warn("don't use this", DeprecationWarning)
if isinstance(trace, list):
if len(trace) > 0 and not isinstance(trace[-1], list) and not isinstance(trace[-1], dict):
# if len(trace) > 2 and isinstance(trace[-1], dict):
# e = np.array(trace[-1], copy=True)
e = {}
for name in trace[-1].colnames:
e[name] = np.atleast_1d(np.nan)
# print('e:')
# print(e)
return trace + [e]
return trace
def fix_endpoints(x, y, color, tag, fig):
""" Adds endpoint markers wherever there is a discontinuity in the data."""
warnings.warn("don't use this", DeprecationWarning)
# expect x and y to be 1d
if len(x.shape) > 1:
raise RuntimeError('Expected x to be 1d')
if len(y.shape) > 1:
raise RuntimeError('Expected y to be 1d')
x_aug = np.concatenate((np.atleast_1d(np.nan), np.atleast_1d(x), np.atleast_1d(np.nan)))
y_aug = np.concatenate((np.atleast_1d(np.nan), np.atleast_1d(y), np.atleast_1d(np.nan)))
w = np.argwhere(np.isnan(x_aug))
endpoints_x = []
endpoints_y = []
for begin, end in ( (int(w[k]+1), int(w[k+1])) for k in range(w.shape[0]-1) ):
if begin != end:
#print('begin {}, end {}'.format(begin, end))
x_values = x_aug[begin:end]
x_identical = np.all(x_values == x_values[0])
y_values = y_aug[begin:end]
y_identical = np.all(y_values == y_values[0])
#print('x_values')
#print(x_values)
#print('x identical? {}'.format(x_identical))
#print('y_values')
#print(y_values)
#print('y identical? {}'.format(y_identical))
if x_identical and y_identical:
# get the coords for the new markers
x_begin = x_values[0]
x_end = x_values[-1]
y_begin = y_values[0]
y_end = y_values[-1]
# append to the lists
endpoints_x += [x_begin, x_end]
endpoints_y += [y_begin, y_end]
if endpoints_x:
fig.addXYDataset(np.array(endpoints_x), np.array(endpoints_y), color=color, tag=tag, mode='markers')
##################################################################################################
if __name__ == "__main__":
import os
from tellurium.tests.testdata import SEDML_TEST_DIR, OMEX_TEST_DIR
import matplotlib
def testInput(sedmlInput):
""" Test function run on inputStr. """
print('\n', '*'*100)
print(sedmlInput)
print('*'*100)
factory = SEDMLCodeFactory(sedmlInput)
# create python file
python_str = factory.toPython()
realPath = os.path.realpath(sedmlInput)
with open(sedmlInput + '.py', 'w') as f:
f.write(python_str)
# execute python
factory.executePython()
# testInput(os.path.join(sedmlDir, "sedMLBIOM21.sedml"))
# Check sed-ml files
for fname in sorted(os.listdir(SEDML_TEST_DIR)):
if fname.endswith(".sedml"):
testInput(os.path.join(SEDML_TEST_DIR, fname))
# Check sedx archives
for fname in sorted(os.listdir(OMEX_TEST_DIR)):
if fname.endswith(".sedx"):
testInput(os.path.join(OMEX_TEST_DIR, fname))
| apache-2.0 |
anurag313/scikit-learn | examples/text/document_classification_20newsgroups.py | 222 | 10500 | """
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how scikit-learn can be used to classify documents
by topics using a bag-of-words approach. This example uses a scipy.sparse
matrix to store the features and demonstrates various classifiers that can
efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset. It will be
automatically downloaded, then cached.
The bar plot indicates the accuracy, training time (normalized) and test time
(normalized) of each classifier.
"""
# Author: Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)),
('classification', LinearSVC())
])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='r')
plt.barh(indices + .3, training_time, .2, label="training time", color='g')
plt.barh(indices + .6, test_time, .2, label="test time", color='b')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
| bsd-3-clause |
JBmiog/ET4394-Wireless-Networking | gnu-radio/fsk_decode_to_char.py | 1 | 2583 | import matplotlib.pyplot as plt
import scipy
import binascii
import re
import os
dir = os.path.dirname(__file__)
#method copied from stackoverflow
# http://stackoverflow.com/questions/7396849/convert-binary-to-ascii-and-vice-versa
def text_to_bits(text, encoding='utf-8', errors='surrogatepass'):
bits = bin(int(binascii.hexlify(text.encode(encoding, errors)), 16))[2:]
return bits.zfill(8 * ((len(bits) + 7) // 8))
#method copied from stackoverflow
# http://stackoverflow.com/questions/7396849/convert-binary-to-ascii-and-vice-versa
def text_from_bits(bits, encoding='utf-8', errors='replace'):
n = int(bits, 2)
return int2bytes(n).decode(encoding, errors)
#method copied from stackoverflow
# http://stackoverflow.com/questions/7396849/convert-binary-to-ascii-and-vice-versa
def int2bytes(i):
hex_string = '%x' % i
n = len(hex_string)
return binascii.unhexlify(hex_string.zfill(n + (n & 1)))
def get_frame_start_and_end(data,preamble,end_of_packet_sequence):
return -1
def get_binairy_data_from_input(data):
bin_data = '0b'
for x in range (1,len(data)):
if(f[x]) < 0:
bin_data = bin_data + '0'
else:
bin_data = bin_data + '1'
return bin_data
def get_relative_end_of_packet(data, end_of_packet):
end = data.find(end_of_packet)
if(end == -1):
return -1
else:
return end
#filename = '../GNR_isolated_signals/Buenos_isolated_after_clock_recovery'
dir = os.path.dirname(__file__)
filename = os.path.join(dir, 'GNR_isolated_signals/Buenos_isolated_after_clock_recovery')
print(filename)
print ("reading file: %s")%filename
f = scipy.fromfile(open(filename),dtype=scipy.float32)
bin_data = get_binairy_data_from_input(f)
print("binairy data: %s")%bin_data
#preamble = text_to_bits('TMCS') + '00001000'
preamble = text_to_bits('TMCS')
print("preamble sequence = %s")%preamble
#packet ends with a carriege return (0x0D) and a line feed(0x0A)
end_of_packet = "00001101" + "00001010"
print("end of packet sequence = %s")%(end_of_packet)
#found at stack overflow, efficient lookup:
#http://stackoverflow.com/questions/4664850/find-all-occurrences-of-a-substring-in-python
x_start_of_packet = [m.start() for m in re.finditer(preamble, bin_data)]
print("found preamble at string elements:")
print(x_start_of_packet)
for i in range(len(x_start_of_packet)):
rel_end = get_relative_end_of_packet(bin_data[x_start_of_packet[1]:], end_of_packet)
data = text_from_bits("0b11111111" + bin_data[x_start_of_packet[1]:x_start_of_packet[1]+rel_end])
print data
| gpl-3.0 |
jaytlennon/Emergence | tools/DiversityTools/distributions/distributions.py | 10 | 2502 | from __future__ import division
import sys
import os
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import statsmodels.api as sm
import statsmodels.formula.api as smf
import pandas as pd
########### PATHS ##############################################################
tools = os.path.expanduser("~/tools")
sys.path.append(tools + "/macroeco_distributions")
import macroeco_distributions as md
######### CLASS ################################################################
class zipf:
""" A class to obtain a zipf object with inherited mle shape parameter,
mle form of the rank-abundance distribution, and a rank-abundance curve
based on fitting the zipf to the observed via a generalized linear model."""
def __init__(self, obs):
self.obs = obs
def from_cdf(self):
""" Obtain the maximum likelihood form of the Zipf distribution, given
the mle value for the Zipf shape parameter (a). Using a, this code
generates a rank-abundance distribution (RAD) from the cumulative
density function (cdf) using the percent point function (ppf) also known
as the quantile function.
see: http://www.esapubs.org/archive/ecol/E093/155/appendix-B.htm
This is an actual form of the Zipf distribution, obtained from getting
the mle for the shape parameter.
"""
p = md.zipf_solver(self.obs)
S = len(self.obs)
rv = stats.zipf(a=p)
rad = []
for i in range(1, S+1):
val = (S - i + 0.5)/S
x = rv.ppf(val)
rad.append(int(x))
return rad
def from_glm(self):
""" Fit the Zipf distribution to the observed vector of integer values
using a generalized linear model.
Note: This is a fitted curve; not an actual form of the Zipf distribution
This method was inspired by the vegan
package's open source code on vegan's public GitHub repository:
https://github.com/vegandevs/vegan/blob/master/R/rad.zipf.R
on Thursday, 19 Marth 2015 """
ranks = np.log(range(1, len(self.obs)+1))
off = [np.log(sum(self.obs))] * len(self.obs)
d = pd.DataFrame({'ranks': ranks, 'off': off, 'x':self.obs})
lm = smf.glm(formula='x ~ ranks', data = d, family = sm.families.Poisson()).fit()
pred = lm.predict()
return pred
# zipf_pred = zipf(ad)
# zipf_mle = zipf_pred.from_cdf()
# zipf_glm = zipf_pred.from_glm()
| mit |
Sumith1896/sympy | sympy/physics/quantum/circuitplot.py | 58 | 12941 | """Matplotlib based plotting of quantum circuits.
Todo:
* Optimize printing of large circuits.
* Get this to work with single gates.
* Do a better job checking the form of circuits to make sure it is a Mul of
Gates.
* Get multi-target gates plotting.
* Get initial and final states to plot.
* Get measurements to plot. Might need to rethink measurement as a gate
issue.
* Get scale and figsize to be handled in a better way.
* Write some tests/examples!
"""
from __future__ import print_function, division
from sympy import Mul
from sympy.core.compatibility import u, range
from sympy.external import import_module
from sympy.physics.quantum.gate import Gate, OneQubitGate, CGate, CGateS
from sympy.core.core import BasicMeta
from sympy.core.assumptions import ManagedProperties
__all__ = [
'CircuitPlot',
'circuit_plot',
'labeller',
'Mz',
'Mx',
'CreateOneQubitGate',
'CreateCGate',
]
np = import_module('numpy')
matplotlib = import_module(
'matplotlib', __import__kwargs={'fromlist': ['pyplot']},
catch=(RuntimeError,)) # This is raised in environments that have no display.
if not np or not matplotlib:
class CircuitPlot(object):
def __init__(*args, **kwargs):
raise ImportError('numpy or matplotlib not available.')
def circuit_plot(*args, **kwargs):
raise ImportError('numpy or matplotlib not available.')
else:
pyplot = matplotlib.pyplot
Line2D = matplotlib.lines.Line2D
Circle = matplotlib.patches.Circle
#from matplotlib import rc
#rc('text',usetex=True)
class CircuitPlot(object):
"""A class for managing a circuit plot."""
scale = 1.0
fontsize = 20.0
linewidth = 1.0
control_radius = 0.05
not_radius = 0.15
swap_delta = 0.05
labels = []
inits = {}
label_buffer = 0.5
def __init__(self, c, nqubits, **kwargs):
self.circuit = c
self.ngates = len(self.circuit.args)
self.nqubits = nqubits
self.update(kwargs)
self._create_grid()
self._create_figure()
self._plot_wires()
self._plot_gates()
self._finish()
def update(self, kwargs):
"""Load the kwargs into the instance dict."""
self.__dict__.update(kwargs)
def _create_grid(self):
"""Create the grid of wires."""
scale = self.scale
wire_grid = np.arange(0.0, self.nqubits*scale, scale, dtype=float)
gate_grid = np.arange(0.0, self.ngates*scale, scale, dtype=float)
self._wire_grid = wire_grid
self._gate_grid = gate_grid
def _create_figure(self):
"""Create the main matplotlib figure."""
self._figure = pyplot.figure(
figsize=(self.ngates*self.scale, self.nqubits*self.scale),
facecolor='w',
edgecolor='w'
)
ax = self._figure.add_subplot(
1, 1, 1,
frameon=True
)
ax.set_axis_off()
offset = 0.5*self.scale
ax.set_xlim(self._gate_grid[0] - offset, self._gate_grid[-1] + offset)
ax.set_ylim(self._wire_grid[0] - offset, self._wire_grid[-1] + offset)
ax.set_aspect('equal')
self._axes = ax
def _plot_wires(self):
"""Plot the wires of the circuit diagram."""
xstart = self._gate_grid[0]
xstop = self._gate_grid[-1]
xdata = (xstart - self.scale, xstop + self.scale)
for i in range(self.nqubits):
ydata = (self._wire_grid[i], self._wire_grid[i])
line = Line2D(
xdata, ydata,
color='k',
lw=self.linewidth
)
self._axes.add_line(line)
if self.labels:
init_label_buffer = 0
if self.inits.get(self.labels[i]): init_label_buffer = 0.25
self._axes.text(
xdata[0]-self.label_buffer-init_label_buffer,ydata[0],
render_label(self.labels[i],self.inits),
size=self.fontsize,
color='k',ha='center',va='center')
self._plot_measured_wires()
def _plot_measured_wires(self):
ismeasured = self._measurements()
xstop = self._gate_grid[-1]
dy = 0.04 # amount to shift wires when doubled
# Plot doubled wires after they are measured
for im in ismeasured:
xdata = (self._gate_grid[ismeasured[im]],xstop+self.scale)
ydata = (self._wire_grid[im]+dy,self._wire_grid[im]+dy)
line = Line2D(
xdata, ydata,
color='k',
lw=self.linewidth
)
self._axes.add_line(line)
# Also double any controlled lines off these wires
for i,g in enumerate(self._gates()):
if isinstance(g, CGate) or isinstance(g, CGateS):
wires = g.controls + g.targets
for wire in wires:
if wire in ismeasured and \
self._gate_grid[i] > self._gate_grid[ismeasured[wire]]:
ydata = min(wires), max(wires)
xdata = self._gate_grid[i]-dy, self._gate_grid[i]-dy
line = Line2D(
xdata, ydata,
color='k',
lw=self.linewidth
)
self._axes.add_line(line)
def _gates(self):
"""Create a list of all gates in the circuit plot."""
gates = []
if isinstance(self.circuit, Mul):
for g in reversed(self.circuit.args):
if isinstance(g, Gate):
gates.append(g)
elif isinstance(self.circuit, Gate):
gates.append(self.circuit)
return gates
def _plot_gates(self):
"""Iterate through the gates and plot each of them."""
for i, gate in enumerate(self._gates()):
gate.plot_gate(self, i)
def _measurements(self):
"""Return a dict {i:j} where i is the index of the wire that has
been measured, and j is the gate where the wire is measured.
"""
ismeasured = {}
for i,g in enumerate(self._gates()):
if getattr(g,'measurement',False):
for target in g.targets:
if target in ismeasured:
if ismeasured[target] > i:
ismeasured[target] = i
else:
ismeasured[target] = i
return ismeasured
def _finish(self):
# Disable clipping to make panning work well for large circuits.
for o in self._figure.findobj():
o.set_clip_on(False)
def one_qubit_box(self, t, gate_idx, wire_idx):
"""Draw a box for a single qubit gate."""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]
self._axes.text(
x, y, t,
color='k',
ha='center',
va='center',
bbox=dict(ec='k', fc='w', fill=True, lw=self.linewidth),
size=self.fontsize
)
def two_qubit_box(self, t, gate_idx, wire_idx):
"""Draw a box for a two qubit gate. Doesn't work yet.
"""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]+0.5
print(self._gate_grid)
print(self._wire_grid)
obj = self._axes.text(
x, y, t,
color='k',
ha='center',
va='center',
bbox=dict(ec='k', fc='w', fill=True, lw=self.linewidth),
size=self.fontsize
)
def control_line(self, gate_idx, min_wire, max_wire):
"""Draw a vertical control line."""
xdata = (self._gate_grid[gate_idx], self._gate_grid[gate_idx])
ydata = (self._wire_grid[min_wire], self._wire_grid[max_wire])
line = Line2D(
xdata, ydata,
color='k',
lw=self.linewidth
)
self._axes.add_line(line)
def control_point(self, gate_idx, wire_idx):
"""Draw a control point."""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]
radius = self.control_radius
c = Circle(
(x, y),
radius*self.scale,
ec='k',
fc='k',
fill=True,
lw=self.linewidth
)
self._axes.add_patch(c)
def not_point(self, gate_idx, wire_idx):
"""Draw a NOT gates as the circle with plus in the middle."""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]
radius = self.not_radius
c = Circle(
(x, y),
radius,
ec='k',
fc='w',
fill=False,
lw=self.linewidth
)
self._axes.add_patch(c)
l = Line2D(
(x, x), (y - radius, y + radius),
color='k',
lw=self.linewidth
)
self._axes.add_line(l)
def swap_point(self, gate_idx, wire_idx):
"""Draw a swap point as a cross."""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]
d = self.swap_delta
l1 = Line2D(
(x - d, x + d),
(y - d, y + d),
color='k',
lw=self.linewidth
)
l2 = Line2D(
(x - d, x + d),
(y + d, y - d),
color='k',
lw=self.linewidth
)
self._axes.add_line(l1)
self._axes.add_line(l2)
def circuit_plot(c, nqubits, **kwargs):
"""Draw the circuit diagram for the circuit with nqubits.
Parameters
==========
c : circuit
The circuit to plot. Should be a product of Gate instances.
nqubits : int
The number of qubits to include in the circuit. Must be at least
as big as the largest `min_qubits`` of the gates.
"""
return CircuitPlot(c, nqubits, **kwargs)
def render_label(label, inits={}):
"""Slightly more flexible way to render labels.
>>> from sympy.physics.quantum.circuitplot import render_label
>>> render_label('q0')
'$|q0\\\\rangle$'
>>> render_label('q0', {'q0':'0'})
'$|q0\\\\rangle=|0\\\\rangle$'
"""
init = inits.get(label)
if init:
return r'$|%s\rangle=|%s\rangle$' % (label, init)
return r'$|%s\rangle$' % label
def labeller(n, symbol='q'):
"""Autogenerate labels for wires of quantum circuits.
Parameters
==========
n : int
number of qubits in the circuit
symbol : string
A character string to precede all gate labels. E.g. 'q_0', 'q_1', etc.
>>> from sympy.physics.quantum.circuitplot import labeller
>>> labeller(2)
['q_1', 'q_0']
>>> labeller(3,'j')
['j_2', 'j_1', 'j_0']
"""
return ['%s_%d' % (symbol,n-i-1) for i in range(n)]
class Mz(OneQubitGate):
"""Mock-up of a z measurement gate.
This is in circuitplot rather than gate.py because it's not a real
gate, it just draws one.
"""
measurement = True
gate_name='Mz'
gate_name_latex=u('M_z')
class Mx(OneQubitGate):
"""Mock-up of an x measurement gate.
This is in circuitplot rather than gate.py because it's not a real
gate, it just draws one.
"""
measurement = True
gate_name='Mx'
gate_name_latex=u('M_x')
class CreateOneQubitGate(ManagedProperties):
def __new__(mcl, name, latexname=None):
if not latexname:
latexname = name
return BasicMeta.__new__(mcl, name + "Gate", (OneQubitGate,),
{'gate_name': name, 'gate_name_latex': latexname})
def CreateCGate(name, latexname=None):
"""Use a lexical closure to make a controlled gate.
"""
if not latexname:
latexname = name
onequbitgate = CreateOneQubitGate(name, latexname)
def ControlledGate(ctrls,target):
return CGate(tuple(ctrls),onequbitgate(target))
return ControlledGate
| bsd-3-clause |
jreback/pandas | pandas/tests/util/test_deprecate_kwarg.py | 8 | 2043 | import pytest
from pandas.util._decorators import deprecate_kwarg
import pandas._testing as tm
@deprecate_kwarg("old", "new")
def _f1(new=False):
return new
_f2_mappings = {"yes": True, "no": False}
@deprecate_kwarg("old", "new", _f2_mappings)
def _f2(new=False):
return new
def _f3_mapping(x):
return x + 1
@deprecate_kwarg("old", "new", _f3_mapping)
def _f3(new=0):
return new
@pytest.mark.parametrize("key,klass", [("old", FutureWarning), ("new", None)])
def test_deprecate_kwarg(key, klass):
x = 78
with tm.assert_produces_warning(klass):
assert _f1(**{key: x}) == x
@pytest.mark.parametrize("key", list(_f2_mappings.keys()))
def test_dict_deprecate_kwarg(key):
with tm.assert_produces_warning(FutureWarning):
assert _f2(old=key) == _f2_mappings[key]
@pytest.mark.parametrize("key", ["bogus", 12345, -1.23])
def test_missing_deprecate_kwarg(key):
with tm.assert_produces_warning(FutureWarning):
assert _f2(old=key) == key
@pytest.mark.parametrize("x", [1, -1.4, 0])
def test_callable_deprecate_kwarg(x):
with tm.assert_produces_warning(FutureWarning):
assert _f3(old=x) == _f3_mapping(x)
def test_callable_deprecate_kwarg_fail():
msg = "((can only|cannot) concatenate)|(must be str)|(Can't convert)"
with pytest.raises(TypeError, match=msg):
_f3(old="hello")
def test_bad_deprecate_kwarg():
msg = "mapping from old to new argument values must be dict or callable!"
with pytest.raises(TypeError, match=msg):
@deprecate_kwarg("old", "new", 0)
def f4(new=None):
return new
@deprecate_kwarg("old", None)
def _f4(old=True, unchanged=True):
return old, unchanged
@pytest.mark.parametrize("key", ["old", "unchanged"])
def test_deprecate_keyword(key):
x = 9
if key == "old":
klass = FutureWarning
expected = (x, True)
else:
klass = None
expected = (True, x)
with tm.assert_produces_warning(klass):
assert _f4(**{key: x}) == expected
| bsd-3-clause |
tleonhardt/CodingPlayground | dataquest/DataVisualiation/basic_plotitng.py | 1 | 4597 | #!/usr/bin/env python
"""
This analyze data on forest firest from a National Park in Portugal. The park is divided up into a
9 by 9 grid. Each fire has corresponding X position on the grid and Y position on the grid
Each row describes a fire that happened in Montesinho National Park. Here's a listing of the columns:
X -- The X position on the grid where the fire occurred.
Y -- The Y position on the grid where the fire occured.
month -- the month the fire occcurred.
day -- the day of the week the fire occurred.
temp -- the temperature in Celsius when the fire occurred.
wind -- the wind speed when the fire occurred in units of km/h
rain -- the rainfall when the fire occurred.
area -- the area the fire consumed in ha
The intent is to gain some familiarity with Matplotlib
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
# Use Pandas to read the CSV file into a DataFrame
forest_fires = pd.read_csv('../data/forest_fires.csv')
# Reset matplotlib defaults
mpl.rcParams.update(mpl.rcParamsDefault)
# Switch default plot style if desired
# plt.style.use("fivethirtyeight")
# plt.style.use("ggplot")
# plt.style.use("dark_background")
# plt.style.use("bmh")
# Enable interactive mode so plt.show() won't block
plt.ion()
## Scatter Plots
# Make a scatter plot with the wind column on the x-axis and the area column on the y-axis
plt.figure()
plt.scatter(forest_fires['wind'], forest_fires['area'])
plt.xlabel('wind speed (km/h)')
plt.ylabel('burned area (ha)')
plt.show()
# Make a scatter plot with the temp column on the x-axis and the area column on the y-axis
plt.figure()
plt.scatter(forest_fires['temp'], forest_fires['area'])
plt.xlabel('temperature (C)')
plt.ylabel('burned area (ha)')
plt.show()
## Line Charts
age = [5, 10, 15, 20, 25, 30]
height = [25, 45, 65, 75, 75, 75]
# Use the plot() method to plot age on the x-axis and height on the y-axis
plt.figure()
plt.plot(age, height)
plt.xlabel('age (years)')
plt.xlabel('height (inches)')
plt.show()
## Bar Graphs
# Use pivot_table() method to calculate the average area of the fires started at each X or Y position
area_by_y = forest_fires.pivot_table(index="Y", values="area", aggfunc=np.mean)
area_by_x = forest_fires.pivot_table(index="X", values="area", aggfunc=np.mean)
# Use the bar() method to plot area_by_y.index on the x-axis and area_by_y on the y-axis
plt.figure()
plt.bar(area_by_y.index, area_by_y)
plt.xlabel('Y grid location')
plt.ylabel('Average area of fires started at location Y')
plt.show()
# Use the bar() method to plot area_by_y.index on the x-axis and area_by_y on the y-axis
plt.figure()
plt.bar(area_by_x.index, area_by_x)
plt.xlabel('X grid location')
plt.ylabel('Average area of fires started at location X')
plt.show()
## Horizontal Bar Graphs
# barh() is a horizontal bar chart and the first variable passed in is plotted on hte y-axis
area_by_month = forest_fires.pivot_table(index="month", values="area", aggfunc=np.mean)
area_by_day = forest_fires.pivot_table(index="day", values="area", aggfunc=np.mean)
# We need to take an extra step to deal with an index consisting of strings
# Use the barh() method to plot range(len(area_by_month)) on the y-axis and area_by_month on the x-axis
plt.figure()
plt.barh(range(len(area_by_month)), area_by_month)
plt.ylabel('month')
plt.xlabel('burned area (ha)')
plt.show()
# Use the barh() method to plot range(len(area_by_day)) on the y-axis and area_by_day on the x-axis
plt.figure()
plt.barh(range(len(area_by_day)), area_by_day)
plt.ylabel('day of week')
plt.xlabel('burned area (ha)')
plt.show()
## Chart Labels
# Make a scatter plot with the wind column of forest_fires on the x-axis and the area column of forest_fires on the y-axis
plt.figure()
plt.scatter(forest_fires['wind'], forest_fires['area'])
plt.title('Wind speed vs fire area')
plt.xlabel('Wind speed when fire started')
plt.ylabel('Area consumed by fire')
plt.show()
## Plot Aesthetics
# Switch to the "fivethirtyeight" style.
plt.style.use("fivethirtyeight")
# plt.style.use("ggplot")
# plt.style.use("dark_background")
# plt.style.use("bmh")
# Make a scatter plot the rain column of forest_fires on the x-axis and the area column of forest_fires on the y-axis
plt.figure()
plt.scatter(forest_fires['rain'], forest_fires['area'])
plt.title('Rain vs Area for forest fires')
plt.xlabel('rainfall when the fire occurred (mm/m2)')
plt.ylabel('Area consumed by fire (ha)')
plt.show()
# Reset matplotlib defaults so we don't effect any other scripts
mpl.rcParams.update(mpl.rcParamsDefault)
| mit |
ryfeus/lambda-packs | Shapely_numpy/source/numpy/core/fromnumeric.py | 22 | 98126 | """Module containing non-deprecated functions borrowed from Numeric.
"""
from __future__ import division, absolute_import, print_function
import types
import warnings
import numpy as np
from .. import VisibleDeprecationWarning
from . import multiarray as mu
from . import umath as um
from . import numerictypes as nt
from .numeric import asarray, array, asanyarray, concatenate
from . import _methods
_dt_ = nt.sctype2char
# functions that are methods
__all__ = [
'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax',
'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip',
'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean',
'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put',
'rank', 'ravel', 'repeat', 'reshape', 'resize', 'round_',
'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze',
'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var',
]
try:
_gentype = types.GeneratorType
except AttributeError:
_gentype = type(None)
# save away Python sum
_sum_ = sum
# functions that are now methods
def _wrapit(obj, method, *args, **kwds):
try:
wrap = obj.__array_wrap__
except AttributeError:
wrap = None
result = getattr(asarray(obj), method)(*args, **kwds)
if wrap:
if not isinstance(result, mu.ndarray):
result = asarray(result)
result = wrap(result)
return result
def _wrapfunc(obj, method, *args, **kwds):
try:
return getattr(obj, method)(*args, **kwds)
# An AttributeError occurs if the object does not have
# such a method in its class.
# A TypeError occurs if the object does have such a method
# in its class, but its signature is not identical to that
# of NumPy's. This situation has occurred in the case of
# a downstream library like 'pandas'.
except (AttributeError, TypeError):
return _wrapit(obj, method, *args, **kwds)
def take(a, indices, axis=None, out=None, mode='raise'):
"""
Take elements from an array along an axis.
This function does the same thing as "fancy" indexing (indexing arrays
using arrays); however, it can be easier to use if you need elements
along a given axis.
Parameters
----------
a : array_like
The source array.
indices : array_like
The indices of the values to extract.
.. versionadded:: 1.8.0
Also allow scalars for indices.
axis : int, optional
The axis over which to select values. By default, the flattened
input array is used.
out : ndarray, optional
If provided, the result will be placed in this array. It should
be of the appropriate shape and dtype.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
Returns
-------
subarray : ndarray
The returned array has the same type as `a`.
See Also
--------
compress : Take elements using a boolean mask
ndarray.take : equivalent method
Examples
--------
>>> a = [4, 3, 5, 7, 6, 8]
>>> indices = [0, 1, 4]
>>> np.take(a, indices)
array([4, 3, 6])
In this example if `a` is an ndarray, "fancy" indexing can be used.
>>> a = np.array(a)
>>> a[indices]
array([4, 3, 6])
If `indices` is not one dimensional, the output also has these dimensions.
>>> np.take(a, [[0, 1], [2, 3]])
array([[4, 3],
[5, 7]])
"""
return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode)
# not deprecated --- copy if necessary, view otherwise
def reshape(a, newshape, order='C'):
"""
Gives a new shape to an array without changing its data.
Parameters
----------
a : array_like
Array to be reshaped.
newshape : int or tuple of ints
The new shape should be compatible with the original shape. If
an integer, then the result will be a 1-D array of that length.
One shape dimension can be -1. In this case, the value is
inferred from the length of the array and remaining dimensions.
order : {'C', 'F', 'A'}, optional
Read the elements of `a` using this index order, and place the
elements into the reshaped array using this index order. 'C'
means to read / write the elements using C-like index order,
with the last axis index changing fastest, back to the first
axis index changing slowest. 'F' means to read / write the
elements using Fortran-like index order, with the first index
changing fastest, and the last index changing slowest. Note that
the 'C' and 'F' options take no account of the memory layout of
the underlying array, and only refer to the order of indexing.
'A' means to read / write the elements in Fortran-like index
order if `a` is Fortran *contiguous* in memory, C-like order
otherwise.
Returns
-------
reshaped_array : ndarray
This will be a new view object if possible; otherwise, it will
be a copy. Note there is no guarantee of the *memory layout* (C- or
Fortran- contiguous) of the returned array.
See Also
--------
ndarray.reshape : Equivalent method.
Notes
-----
It is not always possible to change the shape of an array without
copying the data. If you want an error to be raise if the data is copied,
you should assign the new shape to the shape attribute of the array::
>>> a = np.zeros((10, 2))
# A transpose make the array non-contiguous
>>> b = a.T
# Taking a view makes it possible to modify the shape without modifying
# the initial object.
>>> c = b.view()
>>> c.shape = (20)
AttributeError: incompatible shape for a non-contiguous array
The `order` keyword gives the index ordering both for *fetching* the values
from `a`, and then *placing* the values into the output array.
For example, let's say you have an array:
>>> a = np.arange(6).reshape((3, 2))
>>> a
array([[0, 1],
[2, 3],
[4, 5]])
You can think of reshaping as first raveling the array (using the given
index order), then inserting the elements from the raveled array into the
new array using the same kind of index ordering as was used for the
raveling.
>>> np.reshape(a, (2, 3)) # C-like index ordering
array([[0, 1, 2],
[3, 4, 5]])
>>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape
array([[0, 1, 2],
[3, 4, 5]])
>>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering
array([[0, 4, 3],
[2, 1, 5]])
>>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F')
array([[0, 4, 3],
[2, 1, 5]])
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> np.reshape(a, 6)
array([1, 2, 3, 4, 5, 6])
>>> np.reshape(a, 6, order='F')
array([1, 4, 2, 5, 3, 6])
>>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2
array([[1, 2],
[3, 4],
[5, 6]])
"""
return _wrapfunc(a, 'reshape', newshape, order=order)
def choose(a, choices, out=None, mode='raise'):
"""
Construct an array from an index array and a set of arrays to choose from.
First of all, if confused or uncertain, definitely look at the Examples -
in its full generality, this function is less simple than it might
seem from the following code description (below ndi =
`numpy.lib.index_tricks`):
``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``.
But this omits some subtleties. Here is a fully general summary:
Given an "index" array (`a`) of integers and a sequence of `n` arrays
(`choices`), `a` and each choice array are first broadcast, as necessary,
to arrays of a common shape; calling these *Ba* and *Bchoices[i], i =
0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape``
for each `i`. Then, a new array with shape ``Ba.shape`` is created as
follows:
* if ``mode=raise`` (the default), then, first of all, each element of
`a` (and thus `Ba`) must be in the range `[0, n-1]`; now, suppose that
`i` (in that range) is the value at the `(j0, j1, ..., jm)` position
in `Ba` - then the value at the same position in the new array is the
value in `Bchoices[i]` at that same position;
* if ``mode=wrap``, values in `a` (and thus `Ba`) may be any (signed)
integer; modular arithmetic is used to map integers outside the range
`[0, n-1]` back into that range; and then the new array is constructed
as above;
* if ``mode=clip``, values in `a` (and thus `Ba`) may be any (signed)
integer; negative integers are mapped to 0; values greater than `n-1`
are mapped to `n-1`; and then the new array is constructed as above.
Parameters
----------
a : int array
This array must contain integers in `[0, n-1]`, where `n` is the number
of choices, unless ``mode=wrap`` or ``mode=clip``, in which cases any
integers are permissible.
choices : sequence of arrays
Choice arrays. `a` and all of the choices must be broadcastable to the
same shape. If `choices` is itself an array (not recommended), then
its outermost dimension (i.e., the one corresponding to
``choices.shape[0]``) is taken as defining the "sequence".
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
mode : {'raise' (default), 'wrap', 'clip'}, optional
Specifies how indices outside `[0, n-1]` will be treated:
* 'raise' : an exception is raised
* 'wrap' : value becomes value mod `n`
* 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1
Returns
-------
merged_array : array
The merged result.
Raises
------
ValueError: shape mismatch
If `a` and each choice array are not all broadcastable to the same
shape.
See Also
--------
ndarray.choose : equivalent method
Notes
-----
To reduce the chance of misinterpretation, even though the following
"abuse" is nominally supported, `choices` should neither be, nor be
thought of as, a single array, i.e., the outermost sequence-like container
should be either a list or a tuple.
Examples
--------
>>> choices = [[0, 1, 2, 3], [10, 11, 12, 13],
... [20, 21, 22, 23], [30, 31, 32, 33]]
>>> np.choose([2, 3, 1, 0], choices
... # the first element of the result will be the first element of the
... # third (2+1) "array" in choices, namely, 20; the second element
... # will be the second element of the fourth (3+1) choice array, i.e.,
... # 31, etc.
... )
array([20, 31, 12, 3])
>>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1)
array([20, 31, 12, 3])
>>> # because there are 4 choice arrays
>>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4)
array([20, 1, 12, 3])
>>> # i.e., 0
A couple examples illustrating how choose broadcasts:
>>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
>>> choices = [-10, 10]
>>> np.choose(a, choices)
array([[ 10, -10, 10],
[-10, 10, -10],
[ 10, -10, 10]])
>>> # With thanks to Anne Archibald
>>> a = np.array([0, 1]).reshape((2,1,1))
>>> c1 = np.array([1, 2, 3]).reshape((1,3,1))
>>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5))
>>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
array([[[ 1, 1, 1, 1, 1],
[ 2, 2, 2, 2, 2],
[ 3, 3, 3, 3, 3]],
[[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5]]])
"""
return _wrapfunc(a, 'choose', choices, out=out, mode=mode)
def repeat(a, repeats, axis=None):
"""
Repeat elements of an array.
Parameters
----------
a : array_like
Input array.
repeats : int or array of ints
The number of repetitions for each element. `repeats` is broadcasted
to fit the shape of the given axis.
axis : int, optional
The axis along which to repeat values. By default, use the
flattened input array, and return a flat output array.
Returns
-------
repeated_array : ndarray
Output array which has the same shape as `a`, except along
the given axis.
See Also
--------
tile : Tile an array.
Examples
--------
>>> np.repeat(3, 4)
array([3, 3, 3, 3])
>>> x = np.array([[1,2],[3,4]])
>>> np.repeat(x, 2)
array([1, 1, 2, 2, 3, 3, 4, 4])
>>> np.repeat(x, 3, axis=1)
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> np.repeat(x, [1, 2], axis=0)
array([[1, 2],
[3, 4],
[3, 4]])
"""
return _wrapfunc(a, 'repeat', repeats, axis=axis)
def put(a, ind, v, mode='raise'):
"""
Replaces specified elements of an array with given values.
The indexing works on the flattened target array. `put` is roughly
equivalent to:
::
a.flat[ind] = v
Parameters
----------
a : ndarray
Target array.
ind : array_like
Target indices, interpreted as integers.
v : array_like
Values to place in `a` at target indices. If `v` is shorter than
`ind` it will be repeated as necessary.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
See Also
--------
putmask, place
Examples
--------
>>> a = np.arange(5)
>>> np.put(a, [0, 2], [-44, -55])
>>> a
array([-44, 1, -55, 3, 4])
>>> a = np.arange(5)
>>> np.put(a, 22, -5, mode='clip')
>>> a
array([ 0, 1, 2, 3, -5])
"""
try:
put = a.put
except AttributeError:
raise TypeError("argument 1 must be numpy.ndarray, "
"not {name}".format(name=type(a).__name__))
return put(ind, v, mode=mode)
def swapaxes(a, axis1, axis2):
"""
Interchange two axes of an array.
Parameters
----------
a : array_like
Input array.
axis1 : int
First axis.
axis2 : int
Second axis.
Returns
-------
a_swapped : ndarray
For NumPy >= 1.10.0, if `a` is an ndarray, then a view of `a` is
returned; otherwise a new array is created. For earlier NumPy
versions a view of `a` is returned only if the order of the
axes is changed, otherwise the input array is returned.
Examples
--------
>>> x = np.array([[1,2,3]])
>>> np.swapaxes(x,0,1)
array([[1],
[2],
[3]])
>>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.swapaxes(x,0,2)
array([[[0, 4],
[2, 6]],
[[1, 5],
[3, 7]]])
"""
return _wrapfunc(a, 'swapaxes', axis1, axis2)
def transpose(a, axes=None):
"""
Permute the dimensions of an array.
Parameters
----------
a : array_like
Input array.
axes : list of ints, optional
By default, reverse the dimensions, otherwise permute the axes
according to the values given.
Returns
-------
p : ndarray
`a` with its axes permuted. A view is returned whenever
possible.
See Also
--------
moveaxis
argsort
Notes
-----
Use `transpose(a, argsort(axes))` to invert the transposition of tensors
when using the `axes` keyword argument.
Transposing a 1-D array returns an unchanged view of the original array.
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.transpose(x)
array([[0, 2],
[1, 3]])
>>> x = np.ones((1, 2, 3))
>>> np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
return _wrapfunc(a, 'transpose', axes)
def partition(a, kth, axis=-1, kind='introselect', order=None):
"""
Return a partitioned copy of an array.
Creates a copy of the array with its elements rearranged in such a
way that the value of the element in k-th position is in the
position it would be in a sorted array. All elements smaller than
the k-th element are moved before this element and all equal or
greater are moved behind it. The ordering of the elements in the two
partitions is undefined.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array to be sorted.
kth : int or sequence of ints
Element index to partition by. The k-th value of the element
will be in its final sorted position and all smaller elements
will be moved before it and all equal or greater elements behind
it. The order all elements in the partitions is undefined. If
provided with a sequence of k-th it will partition all elements
indexed by k-th of them into their sorted position at once.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument
specifies which fields to compare first, second, etc. A single
field can be specified as a string. Not all fields need be
specified, but unspecified fields will still be used, in the
order in which they come up in the dtype, to break ties.
Returns
-------
partitioned_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.partition : Method to sort an array in-place.
argpartition : Indirect partition.
sort : Full sorting
Notes
-----
The various selection algorithms are characterized by their average
speed, worst case performance, work space size, and whether they are
stable. A stable sort keeps items with the same key in the same
relative order. The available algorithms have the following
properties:
================= ======= ============= ============ =======
kind speed worst case work space stable
================= ======= ============= ============ =======
'introselect' 1 O(n) 0 no
================= ======= ============= ============ =======
All the partition algorithms make temporary copies of the data when
partitioning along any but the last axis. Consequently,
partitioning along the last axis is faster and uses less space than
partitioning along any other axis.
The sort order for complex numbers is lexicographic. If both the
real and imaginary parts are non-nan then the order is determined by
the real parts except when they are equal, in which case the order
is determined by the imaginary parts.
Examples
--------
>>> a = np.array([3, 4, 2, 1])
>>> np.partition(a, 3)
array([2, 1, 3, 4])
>>> np.partition(a, (1, 3))
array([1, 2, 3, 4])
"""
if axis is None:
a = asanyarray(a).flatten()
axis = 0
else:
a = asanyarray(a).copy(order="K")
a.partition(kth, axis=axis, kind=kind, order=order)
return a
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
"""
Perform an indirect partition along the given axis using the
algorithm specified by the `kind` keyword. It returns an array of
indices of the same shape as `a` that index data along the given
axis in partitioned order.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array to sort.
kth : int or sequence of ints
Element index to partition by. The k-th element will be in its
final sorted position and all smaller elements will be moved
before it and all larger elements behind it. The order all
elements in the partitions is undefined. If provided with a
sequence of k-th it will partition all of them into their sorted
position at once.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If
None, the flattened array is used.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'
order : str or list of str, optional
When `a` is an array with fields defined, this argument
specifies which fields to compare first, second, etc. A single
field can be specified as a string, and not all fields need be
specified, but unspecified fields will still be used, in the
order in which they come up in the dtype, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that partition `a` along the specified axis.
In other words, ``a[index_array]`` yields a partitioned `a`.
See Also
--------
partition : Describes partition algorithms used.
ndarray.partition : Inplace partition.
argsort : Full indirect sort
Notes
-----
See `partition` for notes on the different selection algorithms.
Examples
--------
One dimensional array:
>>> x = np.array([3, 4, 2, 1])
>>> x[np.argpartition(x, 3)]
array([2, 1, 3, 4])
>>> x[np.argpartition(x, (1, 3))]
array([1, 2, 3, 4])
>>> x = [3, 4, 2, 1]
>>> np.array(x)[np.argpartition(x, 3)]
array([2, 1, 3, 4])
"""
return _wrapfunc(a, 'argpartition', kth, axis=axis, kind=kind, order=order)
def sort(a, axis=-1, kind='quicksort', order=None):
"""
Return a sorted copy of an array.
Parameters
----------
a : array_like
Array to be sorted.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm. Default is 'quicksort'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
partition : Partial sort.
Notes
-----
The various sorting algorithms are characterized by their average speed,
worst case performance, work space size, and whether they are stable. A
stable sort keeps items with the same key in the same relative
order. The three available algorithms have the following
properties:
=========== ======= ============= ============ =======
kind speed worst case work space stable
=========== ======= ============= ============ =======
'quicksort' 1 O(n^2) 0 no
'mergesort' 2 O(n*log(n)) ~n/2 yes
'heapsort' 3 O(n*log(n)) 0 no
=========== ======= ============= ============ =======
All the sort algorithms make temporary copies of the data when
sorting along any but the last axis. Consequently, sorting along
the last axis is faster and uses less space than sorting along
any other axis.
The sort order for complex numbers is lexicographic. If both the real
and imaginary parts are non-nan then the order is determined by the
real parts except when they are equal, in which case the order is
determined by the imaginary parts.
Previous to numpy 1.4.0 sorting real and complex arrays containing nan
values led to undefined behaviour. In numpy versions >= 1.4.0 nan
values are sorted to the end. The extended sort order is:
* Real: [R, nan]
* Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
where R is a non-nan real value. Complex values with the same nan
placements are sorted according to the non-nan part if it exists.
Non-nan values are sorted as before.
.. versionadded:: 1.12.0
quicksort has been changed to an introsort which will switch
heapsort when it does not make enough progress. This makes its
worst case O(n*log(n)).
Examples
--------
>>> a = np.array([[1,4],[3,1]])
>>> np.sort(a) # sort along the last axis
array([[1, 4],
[1, 3]])
>>> np.sort(a, axis=None) # sort the flattened array
array([1, 1, 3, 4])
>>> np.sort(a, axis=0) # sort along the first axis
array([[1, 1],
[3, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> dtype = [('name', 'S10'), ('height', float), ('age', int)]
>>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),
... ('Galahad', 1.7, 38)]
>>> a = np.array(values, dtype=dtype) # create a structured array
>>> np.sort(a, order='height') # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.8999999999999999, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
Sort by age, then height if ages are equal:
>>> np.sort(a, order=['age', 'height']) # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),
('Arthur', 1.8, 41)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
"""
if axis is None:
a = asanyarray(a).flatten()
axis = 0
else:
a = asanyarray(a).copy(order="K")
a.sort(axis=axis, kind=kind, order=order)
return a
def argsort(a, axis=-1, kind='quicksort', order=None):
"""
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : array_like
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified axis.
If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
argpartition : Indirect partial sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
As of NumPy 1.4.0 `argsort` works with real/complex arrays containing
nan values. The enhanced sort order is documented in `sort`.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> np.argsort(x, axis=0)
array([[0, 1],
[1, 0]])
>>> np.argsort(x, axis=1)
array([[0, 1],
[0, 1]])
Sorting with keys:
>>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
>>> x
array([(1, 0), (0, 1)],
dtype=[('x', '<i4'), ('y', '<i4')])
>>> np.argsort(x, order=('x','y'))
array([1, 0])
>>> np.argsort(x, order=('y','x'))
array([0, 1])
"""
return _wrapfunc(a, 'argsort', axis=axis, kind=kind, order=order)
def argmax(a, axis=None, out=None):
"""
Returns the indices of the maximum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmax, argmin
amax : The maximum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argmax(a)
5
>>> np.argmax(a, axis=0)
array([1, 1, 1])
>>> np.argmax(a, axis=1)
array([2, 2])
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0, 5, 2, 3, 4, 5])
>>> np.argmax(b) # Only the first occurrence is returned.
1
"""
return _wrapfunc(a, 'argmax', axis=axis, out=out)
def argmin(a, axis=None, out=None):
"""
Returns the indices of the minimum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmin, argmax
amin : The minimum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
Notes
-----
In case of multiple occurrences of the minimum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argmin(a)
0
>>> np.argmin(a, axis=0)
array([0, 0, 0])
>>> np.argmin(a, axis=1)
array([0, 0])
>>> b = np.arange(6)
>>> b[4] = 0
>>> b
array([0, 1, 2, 3, 0, 5])
>>> np.argmin(b) # Only the first occurrence is returned.
0
"""
return _wrapfunc(a, 'argmin', axis=axis, out=out)
def searchsorted(a, v, side='left', sorter=None):
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `a` such that, if the
corresponding elements in `v` were inserted before the indices, the
order of `a` would be preserved.
Parameters
----------
a : 1-D array_like
Input array. If `sorter` is None, then it must be sorted in
ascending order, otherwise `sorter` must be an array of indices
that sort it.
v : array_like
Values to insert into `a`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `a`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort array a into ascending
order. They are typically the result of argsort.
.. versionadded:: 1.7.0
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `v`.
See Also
--------
sort : Return a sorted copy of an array.
histogram : Produce histogram from 1-D data.
Notes
-----
Binary search is used to find the required insertion points.
As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing
`nan` values. The enhanced sort order is documented in `sort`.
Examples
--------
>>> np.searchsorted([1,2,3,4,5], 3)
2
>>> np.searchsorted([1,2,3,4,5], 3, side='right')
3
>>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])
array([0, 5, 1, 2])
"""
return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter)
def resize(a, new_shape):
"""
Return a new array with the specified shape.
If the new array is larger than the original array, then the new
array is filled with repeated copies of `a`. Note that this behavior
is different from a.resize(new_shape) which fills with zeros instead
of repeated copies of `a`.
Parameters
----------
a : array_like
Array to be resized.
new_shape : int or tuple of int
Shape of resized array.
Returns
-------
reshaped_array : ndarray
The new array is formed from the data in the old array, repeated
if necessary to fill out the required number of elements. The
data are repeated in the order that they are stored in memory.
See Also
--------
ndarray.resize : resize an array in-place.
Examples
--------
>>> a=np.array([[0,1],[2,3]])
>>> np.resize(a,(2,3))
array([[0, 1, 2],
[3, 0, 1]])
>>> np.resize(a,(1,4))
array([[0, 1, 2, 3]])
>>> np.resize(a,(2,4))
array([[0, 1, 2, 3],
[0, 1, 2, 3]])
"""
if isinstance(new_shape, (int, nt.integer)):
new_shape = (new_shape,)
a = ravel(a)
Na = len(a)
if not Na:
return mu.zeros(new_shape, a.dtype)
total_size = um.multiply.reduce(new_shape)
n_copies = int(total_size / Na)
extra = total_size % Na
if total_size == 0:
return a[:0]
if extra != 0:
n_copies = n_copies+1
extra = Na-extra
a = concatenate((a,)*n_copies)
if extra > 0:
a = a[:-extra]
return reshape(a, new_shape)
def squeeze(a, axis=None):
"""
Remove single-dimensional entries from the shape of an array.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
.. versionadded:: 1.7.0
Selects a subset of the single-dimensional entries in the
shape. If an axis is selected with shape entry greater than
one, an error is raised.
Returns
-------
squeezed : ndarray
The input array, but with all or a subset of the
dimensions of length 1 removed. This is always `a` itself
or a view into `a`.
Examples
--------
>>> x = np.array([[[0], [1], [2]]])
>>> x.shape
(1, 3, 1)
>>> np.squeeze(x).shape
(3,)
>>> np.squeeze(x, axis=(2,)).shape
(1, 3)
"""
try:
squeeze = a.squeeze
except AttributeError:
return _wrapit(a, 'squeeze')
try:
# First try to use the new axis= parameter
return squeeze(axis=axis)
except TypeError:
# For backwards compatibility
return squeeze()
def diagonal(a, offset=0, axis1=0, axis2=1):
"""
Return specified diagonals.
If `a` is 2-D, returns the diagonal of `a` with the given offset,
i.e., the collection of elements of the form ``a[i, i+offset]``. If
`a` has more than two dimensions, then the axes specified by `axis1`
and `axis2` are used to determine the 2-D sub-array whose diagonal is
returned. The shape of the resulting array can be determined by
removing `axis1` and `axis2` and appending an index to the right equal
to the size of the resulting diagonals.
In versions of NumPy prior to 1.7, this function always returned a new,
independent array containing a copy of the values in the diagonal.
In NumPy 1.7 and 1.8, it continues to return a copy of the diagonal,
but depending on this fact is deprecated. Writing to the resulting
array continues to work as it used to, but a FutureWarning is issued.
Starting in NumPy 1.9 it returns a read-only view on the original array.
Attempting to write to the resulting array will produce an error.
In some future release, it will return a read/write view and writing to
the returned array will alter your original array. The returned array
will have the same type as the input array.
If you don't write to the array returned by this function, then you can
just ignore all of the above.
If you depend on the current behavior, then we suggest copying the
returned array explicitly, i.e., use ``np.diagonal(a).copy()`` instead
of just ``np.diagonal(a)``. This will work with both past and future
versions of NumPy.
Parameters
----------
a : array_like
Array from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be positive or
negative. Defaults to main diagonal (0).
axis1 : int, optional
Axis to be used as the first axis of the 2-D sub-arrays from which
the diagonals should be taken. Defaults to first axis (0).
axis2 : int, optional
Axis to be used as the second axis of the 2-D sub-arrays from
which the diagonals should be taken. Defaults to second axis (1).
Returns
-------
array_of_diagonals : ndarray
If `a` is 2-D and not a matrix, a 1-D array of the same type as `a`
containing the diagonal is returned. If `a` is a matrix, a 1-D
array containing the diagonal is returned in order to maintain
backward compatibility. If the dimension of `a` is greater than
two, then an array of diagonals is returned, "packed" from
left-most dimension to right-most (e.g., if `a` is 3-D, then the
diagonals are "packed" along rows).
Raises
------
ValueError
If the dimension of `a` is less than 2.
See Also
--------
diag : MATLAB work-a-like for 1-D and 2-D arrays.
diagflat : Create diagonal arrays.
trace : Sum along diagonals.
Examples
--------
>>> a = np.arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> a.diagonal()
array([0, 3])
>>> a.diagonal(1)
array([1])
A 3-D example:
>>> a = np.arange(8).reshape(2,2,2); a
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> a.diagonal(0, # Main diagonals of two arrays created by skipping
... 0, # across the outer(left)-most axis last and
... 1) # the "middle" (row) axis first.
array([[0, 6],
[1, 7]])
The sub-arrays whose main diagonals we just obtained; note that each
corresponds to fixing the right-most (column) axis, and that the
diagonals are "packed" in rows.
>>> a[:,:,0] # main diagonal is [0 6]
array([[0, 2],
[4, 6]])
>>> a[:,:,1] # main diagonal is [1 7]
array([[1, 3],
[5, 7]])
"""
if isinstance(a, np.matrix):
# Make diagonal of matrix 1-D to preserve backward compatibility.
return asarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
else:
return asanyarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
Return the sum along diagonals of the array.
If `a` is 2-D, the sum along its diagonal with the given offset
is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
If `a` has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-arrays whose traces are returned.
The shape of the resulting array is the same as that of `a` with `axis1`
and `axis2` removed.
Parameters
----------
a : array_like
Input array, from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to 0.
axis1, axis2 : int, optional
Axes to be used as the first and second axis of the 2-D sub-arrays
from which the diagonals should be taken. Defaults are the first two
axes of `a`.
dtype : dtype, optional
Determines the data-type of the returned array and of the accumulator
where the elements are summed. If dtype has the value None and `a` is
of integer type of precision less than the default integer
precision, then the default integer precision is used. Otherwise,
the precision is the same as that of `a`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and
it must be of the right shape to hold the output.
Returns
-------
sum_along_diagonals : ndarray
If `a` is 2-D, the sum along the diagonal is returned. If `a` has
larger dimensions, then an array of sums along diagonals is returned.
See Also
--------
diag, diagonal, diagflat
Examples
--------
>>> np.trace(np.eye(3))
3.0
>>> a = np.arange(8).reshape((2,2,2))
>>> np.trace(a)
array([6, 8])
>>> a = np.arange(24).reshape((2,2,2,3))
>>> np.trace(a).shape
(2, 3)
"""
if isinstance(a, np.matrix):
# Get trace of matrix via an array to preserve backward compatibility.
return asarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)
else:
return asanyarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)
def ravel(a, order='C'):
"""Return a contiguous flattened array.
A 1-D array, containing the elements of the input, is returned. A copy is
made only if needed.
As of NumPy 1.10, the returned array will have the same type as the input
array. (for example, a masked array will be returned for a masked array
input)
Parameters
----------
a : array_like
Input array. The elements in `a` are read in the order specified by
`order`, and packed as a 1-D array.
order : {'C','F', 'A', 'K'}, optional
The elements of `a` are read using this index order. 'C' means
to index the elements in row-major, C-style order,
with the last axis index changing fastest, back to the first
axis index changing slowest. 'F' means to index the elements
in column-major, Fortran-style order, with the
first index changing fastest, and the last index changing
slowest. Note that the 'C' and 'F' options take no account of
the memory layout of the underlying array, and only refer to
the order of axis indexing. 'A' means to read the elements in
Fortran-like index order if `a` is Fortran *contiguous* in
memory, C-like order otherwise. 'K' means to read the
elements in the order they occur in memory, except for
reversing the data when strides are negative. By default, 'C'
index order is used.
Returns
-------
y : array_like
If `a` is a matrix, y is a 1-D ndarray, otherwise y is an array of
the same subtype as `a`. The shape of the returned array is
``(a.size,)``. Matrices are special cased for backward
compatibility.
See Also
--------
ndarray.flat : 1-D iterator over an array.
ndarray.flatten : 1-D array copy of the elements of an array
in row-major order.
ndarray.reshape : Change the shape of an array without changing its data.
Notes
-----
In row-major, C-style order, in two dimensions, the row index
varies the slowest, and the column index the quickest. This can
be generalized to multiple dimensions, where row-major order
implies that the index along the first axis varies slowest, and
the index along the last quickest. The opposite holds for
column-major, Fortran-style index ordering.
When a view is desired in as many cases as possible, ``arr.reshape(-1)``
may be preferable.
Examples
--------
It is equivalent to ``reshape(-1, order=order)``.
>>> x = np.array([[1, 2, 3], [4, 5, 6]])
>>> print(np.ravel(x))
[1 2 3 4 5 6]
>>> print(x.reshape(-1))
[1 2 3 4 5 6]
>>> print(np.ravel(x, order='F'))
[1 4 2 5 3 6]
When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering:
>>> print(np.ravel(x.T))
[1 4 2 5 3 6]
>>> print(np.ravel(x.T, order='A'))
[1 2 3 4 5 6]
When ``order`` is 'K', it will preserve orderings that are neither 'C'
nor 'F', but won't reverse axes:
>>> a = np.arange(3)[::-1]; a
array([2, 1, 0])
>>> a.ravel(order='C')
array([2, 1, 0])
>>> a.ravel(order='K')
array([2, 1, 0])
>>> a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a
array([[[ 0, 2, 4],
[ 1, 3, 5]],
[[ 6, 8, 10],
[ 7, 9, 11]]])
>>> a.ravel(order='C')
array([ 0, 2, 4, 1, 3, 5, 6, 8, 10, 7, 9, 11])
>>> a.ravel(order='K')
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
"""
if isinstance(a, np.matrix):
return asarray(a).ravel(order=order)
else:
return asanyarray(a).ravel(order=order)
def nonzero(a):
"""
Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of `a`,
containing the indices of the non-zero elements in that
dimension. The values in `a` are always tested and returned in
row-major, C-style order. The corresponding non-zero
values can be obtained with::
a[nonzero(a)]
To group the indices by element, rather than dimension, use::
transpose(nonzero(a))
The result of this is always a 2-D array, with a row for
each non-zero element.
Parameters
----------
a : array_like
Input array.
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
ndarray.nonzero :
Equivalent ndarray method.
count_nonzero :
Counts the number of non-zero elements in the input array.
Examples
--------
>>> x = np.eye(3)
>>> x
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> np.nonzero(x)
(array([0, 1, 2]), array([0, 1, 2]))
>>> x[np.nonzero(x)]
array([ 1., 1., 1.])
>>> np.transpose(np.nonzero(x))
array([[0, 0],
[1, 1],
[2, 2]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, np.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = np.array([[1,2,3],[4,5,6],[7,8,9]])
>>> a > 3
array([[False, False, False],
[ True, True, True],
[ True, True, True]], dtype=bool)
>>> np.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
The ``nonzero`` method of the boolean array can also be called.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
"""
return _wrapfunc(a, 'nonzero')
def shape(a):
"""
Return the shape of an array.
Parameters
----------
a : array_like
Input array.
Returns
-------
shape : tuple of ints
The elements of the shape tuple give the lengths of the
corresponding array dimensions.
See Also
--------
alen
ndarray.shape : Equivalent array method.
Examples
--------
>>> np.shape(np.eye(3))
(3, 3)
>>> np.shape([[1, 2]])
(1, 2)
>>> np.shape([0])
(1,)
>>> np.shape(0)
()
>>> a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
>>> np.shape(a)
(2,)
>>> a.shape
(2,)
"""
try:
result = a.shape
except AttributeError:
result = asarray(a).shape
return result
def compress(condition, a, axis=None, out=None):
"""
Return selected slices of an array along given axis.
When working along a given axis, a slice along that axis is returned in
`output` for each index where `condition` evaluates to True. When
working on a 1-D array, `compress` is equivalent to `extract`.
Parameters
----------
condition : 1-D array of bools
Array that selects which entries to return. If len(condition)
is less than the size of `a` along the given axis, then output is
truncated to the length of the condition array.
a : array_like
Array from which to extract a part.
axis : int, optional
Axis along which to take slices. If None (default), work on the
flattened array.
out : ndarray, optional
Output array. Its type is preserved and it must be of the right
shape to hold the output.
Returns
-------
compressed_array : ndarray
A copy of `a` without the slices along axis for which `condition`
is false.
See Also
--------
take, choose, diag, diagonal, select
ndarray.compress : Equivalent method in ndarray
np.extract: Equivalent method when working on 1-D arrays
numpy.doc.ufuncs : Section "Output arguments"
Examples
--------
>>> a = np.array([[1, 2], [3, 4], [5, 6]])
>>> a
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.compress([0, 1], a, axis=0)
array([[3, 4]])
>>> np.compress([False, True, True], a, axis=0)
array([[3, 4],
[5, 6]])
>>> np.compress([False, True], a, axis=1)
array([[2],
[4],
[6]])
Working on the flattened array does not return slices along an axis but
selects elements.
>>> np.compress([False, True], a)
array([2])
"""
return _wrapfunc(a, 'compress', condition, axis=axis, out=out)
def clip(a, a_min, a_max, out=None):
"""
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Parameters
----------
a : array_like
Array containing elements to clip.
a_min : scalar or array_like
Minimum value.
a_max : scalar or array_like
Maximum value. If `a_min` or `a_max` are array_like, then they will
be broadcasted to the shape of `a`.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
See Also
--------
numpy.doc.ufuncs : Section "Output arguments"
Examples
--------
>>> a = np.arange(10)
>>> np.clip(a, 1, 8)
array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8])
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, 3, 6, out=a)
array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
>>> a = np.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, [3,4,1,1,1,4,4,4,4,4], 8)
array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8])
"""
return _wrapfunc(a, 'clip', a_min, a_max, out=out)
def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Sum of array elements over a given axis.
Parameters
----------
a : array_like
Elements to sum.
axis : None or int or tuple of ints, optional
Axis or axes along which a sum is performed. The default,
axis=None, will sum all of the elements of the input array. If
axis is negative it counts from the last to the first axis.
.. versionadded:: 1.7.0
If axis is a tuple of ints, a sum is performed on all of the axes
specified in the tuple instead of a single axis or all the axes as
before.
dtype : dtype, optional
The type of the returned array and of the accumulator in which the
elements are summed. The dtype of `a` is used by default unless `a`
has an integer dtype of less precision than the default platform
integer. In that case, if `a` is signed then the platform integer
is used while if `a` is unsigned then an unsigned integer of the
same precision as the platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `sum` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
sum_along_axis : ndarray
An array with the same shape as `a`, with the specified
axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar
is returned. If an output array is specified, a reference to
`out` is returned.
See Also
--------
ndarray.sum : Equivalent method.
cumsum : Cumulative sum of array elements.
trapz : Integration of array values using the composite trapezoidal rule.
mean, average
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
The sum of an empty array is the neutral element 0:
>>> np.sum([])
0.0
Examples
--------
>>> np.sum([0.5, 1.5])
2.0
>>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32)
1
>>> np.sum([[0, 1], [0, 5]])
6
>>> np.sum([[0, 1], [0, 5]], axis=0)
array([0, 6])
>>> np.sum([[0, 1], [0, 5]], axis=1)
array([1, 5])
If the accumulator is too small, overflow occurs:
>>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
-128
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if isinstance(a, _gentype):
res = _sum_(a)
if out is not None:
out[...] = res
return out
return res
if type(a) is not mu.ndarray:
try:
sum = a.sum
except AttributeError:
pass
else:
return sum(axis=axis, dtype=dtype, out=out, **kwargs)
return _methods._sum(a, axis=axis, dtype=dtype,
out=out, **kwargs)
def product(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the product of array elements over a given axis.
See Also
--------
prod : equivalent function; see for details.
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
return um.multiply.reduce(a, axis=axis, dtype=dtype, out=out, **kwargs)
def sometrue(a, axis=None, out=None, keepdims=np._NoValue):
"""
Check whether some values are true.
Refer to `any` for full documentation.
See Also
--------
any : equivalent function
"""
arr = asanyarray(a)
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
return arr.any(axis=axis, out=out, **kwargs)
def alltrue(a, axis=None, out=None, keepdims=np._NoValue):
"""
Check if all elements of input array are true.
See Also
--------
numpy.all : Equivalent function; see for details.
"""
arr = asanyarray(a)
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
return arr.all(axis=axis, out=out, **kwargs)
def any(a, axis=None, out=None, keepdims=np._NoValue):
"""
Test whether any array element along a given axis evaluates to True.
Returns single boolean unless `axis` is not ``None``
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical OR reduction is performed.
The default (`axis` = `None`) is to perform a logical OR over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output and its type is preserved
(e.g., if it is of type float, then it will remain so, returning
1.0 for True and 0.0 for False, regardless of the type of `a`).
See `doc.ufuncs` (Section "Output arguments") for details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `any` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
any : bool or ndarray
A new boolean or `ndarray` is returned unless `out` is specified,
in which case a reference to `out` is returned.
See Also
--------
ndarray.any : equivalent method
all : Test whether all elements along a given axis evaluate to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity evaluate
to `True` because these are not equal to zero.
Examples
--------
>>> np.any([[True, False], [True, True]])
True
>>> np.any([[True, False], [False, False]], axis=0)
array([ True, False], dtype=bool)
>>> np.any([-1, 0, 5])
True
>>> np.any(np.nan)
True
>>> o=np.array([False])
>>> z=np.any([-1, 4, 5], out=o)
>>> z, o
(array([ True], dtype=bool), array([ True], dtype=bool))
>>> # Check now that z is a reference to o
>>> z is o
True
>>> id(z), id(o) # identity of z and o # doctest: +SKIP
(191614240, 191614240)
"""
arr = asanyarray(a)
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
return arr.any(axis=axis, out=out, **kwargs)
def all(a, axis=None, out=None, keepdims=np._NoValue):
"""
Test whether all array elements along a given axis evaluate to True.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical AND reduction is performed.
The default (`axis` = `None`) is to perform a logical AND over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : ndarray, optional
Alternate output array in which to place the result.
It must have the same shape as the expected output and its
type is preserved (e.g., if ``dtype(out)`` is float, the result
will consist of 0.0's and 1.0's). See `doc.ufuncs` (Section
"Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `all` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
all : ndarray, bool
A new boolean or array is returned unless `out` is specified,
in which case a reference to `out` is returned.
See Also
--------
ndarray.all : equivalent method
any : Test whether any element along a given axis evaluates to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to `True` because these are not equal to zero.
Examples
--------
>>> np.all([[True,False],[True,True]])
False
>>> np.all([[True,False],[True,True]], axis=0)
array([ True, False], dtype=bool)
>>> np.all([-1, 4, 5])
True
>>> np.all([1.0, np.nan])
True
>>> o=np.array([False])
>>> z=np.all([-1, 4, 5], out=o)
>>> id(z), id(o), z # doctest: +SKIP
(28293632, 28293632, array([ True], dtype=bool))
"""
arr = asanyarray(a)
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
return arr.all(axis=axis, out=out, **kwargs)
def cumsum(a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
(None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary. See `doc.ufuncs`
(Section "Output arguments") for more details.
Returns
-------
cumsum_along_axis : ndarray.
A new array holding the result is returned unless `out` is
specified, in which case a reference to `out` is returned. The
result has the same size as `a`, and the same shape as `a` if
`axis` is not None or `a` is a 1-d array.
See Also
--------
sum : Sum array elements.
trapz : Integration of array values using the composite trapezoidal rule.
diff : Calculate the n-th discrete difference along given axis.
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.cumsum(a)
array([ 1, 3, 6, 10, 15, 21])
>>> np.cumsum(a, dtype=float) # specifies type of output value(s)
array([ 1., 3., 6., 10., 15., 21.])
>>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns
array([[1, 2, 3],
[5, 7, 9]])
>>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows
array([[ 1, 3, 6],
[ 4, 9, 15]])
"""
return _wrapfunc(a, 'cumsum', axis=axis, dtype=dtype, out=out)
def cumproduct(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product over the given axis.
See Also
--------
cumprod : equivalent function; see for details.
"""
return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out)
def ptp(a, axis=None, out=None):
"""
Range of values (maximum - minimum) along an axis.
The name of the function comes from the acronym for 'peak to peak'.
Parameters
----------
a : array_like
Input values.
axis : int, optional
Axis along which to find the peaks. By default, flatten the
array.
out : array_like
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type of the output values will be cast if necessary.
Returns
-------
ptp : ndarray
A new array holding the result, unless `out` was
specified, in which case a reference to `out` is returned.
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.ptp(x, axis=0)
array([2, 2])
>>> np.ptp(x, axis=1)
array([1, 1])
"""
return _wrapfunc(a, 'ptp', axis=axis, out=out)
def amax(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return the maximum of an array or maximum along an axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which to operate. By default, flattened input is
used.
.. versionadded:: 1.7.0
If this is a tuple of ints, the maximum is selected over multiple axes,
instead of a single axis or all the axes as before.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `amax` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
amax : ndarray or scalar
Maximum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
amin :
The minimum value of an array along a given axis, propagating any NaNs.
nanmax :
The maximum value of an array along a given axis, ignoring any NaNs.
maximum :
Element-wise maximum of two arrays, propagating any NaNs.
fmax :
Element-wise maximum of two arrays, ignoring any NaNs.
argmax :
Return the indices of the maximum values.
nanmin, minimum, fmin
Notes
-----
NaN values are propagated, that is if at least one item is NaN, the
corresponding max value will be NaN as well. To ignore NaN values
(MATLAB behavior), please use nanmax.
Don't use `amax` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than
``amax(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amax(a) # Maximum of the flattened array
3
>>> np.amax(a, axis=0) # Maxima along the first axis
array([2, 3])
>>> np.amax(a, axis=1) # Maxima along the second axis
array([1, 3])
>>> b = np.arange(5, dtype=np.float)
>>> b[2] = np.NaN
>>> np.amax(b)
nan
>>> np.nanmax(b)
4.0
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
amax = a.max
except AttributeError:
pass
else:
return amax(axis=axis, out=out, **kwargs)
return _methods._amax(a, axis=axis,
out=out, **kwargs)
def amin(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return the minimum of an array or minimum along an axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which to operate. By default, flattened input is
used.
.. versionadded:: 1.7.0
If this is a tuple of ints, the minimum is selected over multiple axes,
instead of a single axis or all the axes as before.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `amin` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
amin : ndarray or scalar
Minimum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
amax :
The maximum value of an array along a given axis, propagating any NaNs.
nanmin :
The minimum value of an array along a given axis, ignoring any NaNs.
minimum :
Element-wise minimum of two arrays, propagating any NaNs.
fmin :
Element-wise minimum of two arrays, ignoring any NaNs.
argmin :
Return the indices of the minimum values.
nanmax, maximum, fmax
Notes
-----
NaN values are propagated, that is if at least one item is NaN, the
corresponding min value will be NaN as well. To ignore NaN values
(MATLAB behavior), please use nanmin.
Don't use `amin` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than
``amin(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amin(a) # Minimum of the flattened array
0
>>> np.amin(a, axis=0) # Minima along the first axis
array([0, 1])
>>> np.amin(a, axis=1) # Minima along the second axis
array([0, 2])
>>> b = np.arange(5, dtype=np.float)
>>> b[2] = np.NaN
>>> np.amin(b)
nan
>>> np.nanmin(b)
0.0
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
amin = a.min
except AttributeError:
pass
else:
return amin(axis=axis, out=out, **kwargs)
return _methods._amin(a, axis=axis,
out=out, **kwargs)
def alen(a):
"""
Return the length of the first dimension of the input array.
Parameters
----------
a : array_like
Input array.
Returns
-------
alen : int
Length of the first dimension of `a`.
See Also
--------
shape, size
Examples
--------
>>> a = np.zeros((7,4,5))
>>> a.shape[0]
7
>>> np.alen(a)
7
"""
try:
return len(a)
except TypeError:
return len(array(a, ndmin=1))
def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the product of array elements over a given axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which a product is performed. The default,
axis=None, will calculate the product of all the elements in the
input array. If axis is negative it counts from the last to the
first axis.
.. versionadded:: 1.7.0
If axis is a tuple of ints, a product is performed on all of the
axes specified in the tuple instead of a single axis or all the
axes as before.
dtype : dtype, optional
The type of the returned array, as well as of the accumulator in
which the elements are multiplied. The dtype of `a` is used by
default unless `a` has an integer dtype of less precision than the
default platform integer. In that case, if `a` is signed then the
platform integer is used while if `a` is unsigned then an unsigned
integer of the same precision as the platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `prod` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
product_along_axis : ndarray, see `dtype` parameter above.
An array shaped as `a` but with the specified axis removed.
Returns a reference to `out` if specified.
See Also
--------
ndarray.prod : equivalent method
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow. That means that, on a 32-bit platform:
>>> x = np.array([536870910, 536870910, 536870910, 536870910])
>>> np.prod(x) #random
16
The product of an empty array is the neutral element 1:
>>> np.prod([])
1.0
Examples
--------
By default, calculate the product of all elements:
>>> np.prod([1.,2.])
2.0
Even when the input array is two-dimensional:
>>> np.prod([[1.,2.],[3.,4.]])
24.0
But we can also specify the axis over which to multiply:
>>> np.prod([[1.,2.],[3.,4.]], axis=1)
array([ 2., 12.])
If the type of `x` is unsigned, then the output type is
the unsigned platform integer:
>>> x = np.array([1, 2, 3], dtype=np.uint8)
>>> np.prod(x).dtype == np.uint
True
If `x` is of a signed integer type, then the output type
is the default platform integer:
>>> x = np.array([1, 2, 3], dtype=np.int8)
>>> np.prod(x).dtype == np.int
True
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
prod = a.prod
except AttributeError:
pass
else:
return prod(axis=axis, dtype=dtype, out=out, **kwargs)
return _methods._prod(a, axis=axis, dtype=dtype,
out=out, **kwargs)
def cumprod(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product of elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative product is computed. By default
the input is flattened.
dtype : dtype, optional
Type of the returned array, as well as of the accumulator in which
the elements are multiplied. If *dtype* is not specified, it
defaults to the dtype of `a`, unless `a` has an integer dtype with
a precision less than that of the default platform integer. In
that case, the default platform integer is used instead.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type of the resulting values will be cast if necessary.
Returns
-------
cumprod : ndarray
A new array holding the result is returned unless `out` is
specified, in which case a reference to out is returned.
See Also
--------
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([1,2,3])
>>> np.cumprod(a) # intermediate results 1, 1*2
... # total product 1*2*3 = 6
array([1, 2, 6])
>>> a = np.array([[1, 2, 3], [4, 5, 6]])
>>> np.cumprod(a, dtype=float) # specify type of output
array([ 1., 2., 6., 24., 120., 720.])
The cumulative product for each column (i.e., over the rows) of `a`:
>>> np.cumprod(a, axis=0)
array([[ 1, 2, 3],
[ 4, 10, 18]])
The cumulative product for each row (i.e. over the columns) of `a`:
>>> np.cumprod(a,axis=1)
array([[ 1, 2, 6],
[ 4, 20, 120]])
"""
return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out)
def ndim(a):
"""
Return the number of dimensions of an array.
Parameters
----------
a : array_like
Input array. If it is not already an ndarray, a conversion is
attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in `a`. Scalars are zero-dimensional.
See Also
--------
ndarray.ndim : equivalent method
shape : dimensions of array
ndarray.shape : dimensions of array
Examples
--------
>>> np.ndim([[1,2,3],[4,5,6]])
2
>>> np.ndim(np.array([[1,2,3],[4,5,6]]))
2
>>> np.ndim(1)
0
"""
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def rank(a):
"""
Return the number of dimensions of an array.
If `a` is not already an array, a conversion is attempted.
Scalars are zero dimensional.
.. note::
This function is deprecated in NumPy 1.9 to avoid confusion with
`numpy.linalg.matrix_rank`. The ``ndim`` attribute or function
should be used instead.
Parameters
----------
a : array_like
Array whose number of dimensions is desired. If `a` is not an array,
a conversion is attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in the array.
See Also
--------
ndim : equivalent function
ndarray.ndim : equivalent property
shape : dimensions of array
ndarray.shape : dimensions of array
Notes
-----
In the old Numeric package, `rank` was the term used for the number of
dimensions, but in NumPy `ndim` is used instead.
Examples
--------
>>> np.rank([1,2,3])
1
>>> np.rank(np.array([[1,2,3],[4,5,6]]))
2
>>> np.rank(1)
0
"""
# 2014-04-12, 1.9
warnings.warn(
"`rank` is deprecated; use the `ndim` attribute or function instead. "
"To find the rank of a matrix see `numpy.linalg.matrix_rank`.",
VisibleDeprecationWarning, stacklevel=2)
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def size(a, axis=None):
"""
Return the number of elements along a given axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which the elements are counted. By default, give
the total number of elements.
Returns
-------
element_count : int
Number of elements along the specified axis.
See Also
--------
shape : dimensions of array
ndarray.shape : dimensions of array
ndarray.size : number of elements in array
Examples
--------
>>> a = np.array([[1,2,3],[4,5,6]])
>>> np.size(a)
6
>>> np.size(a,1)
3
>>> np.size(a,0)
2
"""
if axis is None:
try:
return a.size
except AttributeError:
return asarray(a).size
else:
try:
return a.shape[axis]
except AttributeError:
return asarray(a).shape[axis]
def around(a, decimals=0, out=None):
"""
Evenly round to the given number of decimals.
Parameters
----------
a : array_like
Input data.
decimals : int, optional
Number of decimal places to round to (default: 0). If
decimals is negative, it specifies the number of positions to
the left of the decimal point.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary. See `doc.ufuncs` (Section
"Output arguments") for details.
Returns
-------
rounded_array : ndarray
An array of the same type as `a`, containing the rounded values.
Unless `out` was specified, a new array is created. A reference to
the result is returned.
The real and imaginary parts of complex numbers are rounded
separately. The result of rounding a float is a float.
See Also
--------
ndarray.round : equivalent method
ceil, fix, floor, rint, trunc
Notes
-----
For values exactly halfway between rounded decimal values, NumPy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc. Results may also be surprising due
to the inexact representation of decimal fractions in the IEEE
floating point standard [1]_ and errors introduced when scaling
by powers of ten.
References
----------
.. [1] "Lecture Notes on the Status of IEEE 754", William Kahan,
http://www.cs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF
.. [2] "How Futile are Mindless Assessments of
Roundoff in Floating-Point Computation?", William Kahan,
http://www.cs.berkeley.edu/~wkahan/Mindless.pdf
Examples
--------
>>> np.around([0.37, 1.64])
array([ 0., 2.])
>>> np.around([0.37, 1.64], decimals=1)
array([ 0.4, 1.6])
>>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
array([ 0., 2., 2., 4., 4.])
>>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned
array([ 1, 2, 3, 11])
>>> np.around([1,2,3,11], decimals=-1)
array([ 0, 0, 0, 10])
"""
return _wrapfunc(a, 'round', decimals=decimals, out=out)
def round_(a, decimals=0, out=None):
"""
Round an array to the given number of decimals.
Refer to `around` for full documentation.
See Also
--------
around : equivalent function
"""
return around(a, decimals=decimals, out=out)
def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements. The average is taken over
the flattened array by default, otherwise over the specified axis.
`float64` intermediate and return values are used for integer inputs.
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a mean is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default
is `float64`; for floating point inputs, it is the same as the
input dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary.
See `doc.ufuncs` for details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `mean` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
m : ndarray, see dtype parameter above
If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
See Also
--------
average : Weighted average
std, var, nanmean, nanstd, nanvar
Notes
-----
The arithmetic mean is the sum of the elements along the axis divided
by the number of elements.
Note that for floating-point input, the mean is computed using the
same precision the input has. Depending on the input data, this can
cause the results to be inaccurate, especially for `float32` (see
example below). Specifying a higher-precision accumulator using the
`dtype` keyword can alleviate this issue.
By default, `float16` results are computed using `float32` intermediates
for extra precision.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.mean(a)
2.5
>>> np.mean(a, axis=0)
array([ 2., 3.])
>>> np.mean(a, axis=1)
array([ 1.5, 3.5])
In single precision, `mean` can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.mean(a)
0.54999924
Computing the mean in float64 is more accurate:
>>> np.mean(a, dtype=np.float64)
0.55000000074505806
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
mean = a.mean
except AttributeError:
pass
else:
return mean(axis=axis, dtype=dtype, out=out, **kwargs)
return _methods._mean(a, axis=axis, dtype=dtype,
out=out, **kwargs)
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the array elements. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
Parameters
----------
a : array_like
Calculate the standard deviation of these values.
axis : None or int or tuple of ints, optional
Axis or axes along which the standard deviation is computed. The
default is to compute the standard deviation of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a standard deviation is performed over
multiple axes, instead of a single axis or all the axes as before.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it is
the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the calculated
values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `std` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard deviation,
otherwise return a reference to the output array.
See Also
--------
var, mean, nanmean, nanstd, nanvar
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The standard deviation is the square root of the average of the squared
deviations from the mean, i.e., ``std = sqrt(mean(abs(x - x.mean())**2))``.
The average squared deviation is normally calculated as
``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is specified,
the divisor ``N - ddof`` is used instead. In standard statistical
practice, ``ddof=1`` provides an unbiased estimator of the variance
of the infinite population. ``ddof=0`` provides a maximum likelihood
estimate of the variance for normally distributed variables. The
standard deviation computed in this function is the square root of
the estimated variance, so even with ``ddof=1``, it will not be an
unbiased estimate of the standard deviation per se.
Note that, for complex numbers, `std` takes the absolute
value before squaring, so that the result is always real and nonnegative.
For floating-point input, the *std* is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for float32 (see example below).
Specifying a higher-accuracy accumulator using the `dtype` keyword can
alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
1.1180339887498949
>>> np.std(a, axis=0)
array([ 1., 1.])
>>> np.std(a, axis=1)
array([ 0.5, 0.5])
In single precision, std() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.std(a)
0.45000005
Computing the standard deviation in float64 is more accurate:
>>> np.std(a, dtype=np.float64)
0.44999999925494177
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
std = a.std
except AttributeError:
pass
else:
return std(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
return _methods._std(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
**kwargs)
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the variance along the specified axis.
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by
default, otherwise over the specified axis.
Parameters
----------
a : array_like
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the variance is computed. The default is to
compute the variance of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a variance is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
the default is `float32`; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output, but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where ``N`` represents the number of elements. By
default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `var` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
variance : ndarray, see dtype parameter above
If ``out=None``, returns a new array containing the variance;
otherwise, a reference to the output array is returned.
See Also
--------
std , mean, nanmean, nanstd, nanvar
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The variance is the average of the squared deviations from the mean,
i.e., ``var = mean(abs(x - x.mean())**2)``.
The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``.
If, however, `ddof` is specified, the divisor ``N - ddof`` is used
instead. In standard statistical practice, ``ddof=1`` provides an
unbiased estimator of the variance of a hypothetical infinite population.
``ddof=0`` provides a maximum likelihood estimate of the variance for
normally distributed variables.
Note that for complex numbers, the absolute value is taken before
squaring, so that the result is always real and nonnegative.
For floating-point input, the variance is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for `float32` (see example
below). Specifying a higher-accuracy accumulator using the ``dtype``
keyword can alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.var(a)
1.25
>>> np.var(a, axis=0)
array([ 1., 1.])
>>> np.var(a, axis=1)
array([ 0.25, 0.25])
In single precision, var() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.var(a)
0.20250003
Computing the variance in float64 is more accurate:
>>> np.var(a, dtype=np.float64)
0.20249999932944759
>>> ((1-0.55)**2 + (0.1-0.55)**2)/2
0.2025
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
var = a.var
except AttributeError:
pass
else:
return var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
**kwargs)
| mit |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/userdemo/colormap_normalizations_custom.py | 1 | 2397 | """
==============================
Colormap Normalizations Custom
==============================
Demonstration of using norm to map colormaps onto data in non-linear ways.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib.mlab import bivariate_normal
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
N = 100
'''
Custom Norm: An example with a customized normalization. This one
uses the example above, and normalizes the negative data differently
from the positive.
'''
X, Y = np.mgrid[-3:3:complex(0, N), -2:2:complex(0, N)]
Z1 = (bivariate_normal(X, Y, 1., 1., 1.0, 1.0))**2 \
- 0.4 * (bivariate_normal(X, Y, 1.0, 1.0, -1.0, 0.0))**2
Z1 = Z1/0.03
# Example of making your own norm. Also see matplotlib.colors.
# From Joe Kington: This one gives two different linear ramps:
class MidpointNormalize(colors.Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
#####
fig, ax = plt.subplots(2, 1)
pcm = ax[0].pcolormesh(X, Y, Z1,
norm=MidpointNormalize(midpoint=0.),
cmap='RdBu_r')
fig.colorbar(pcm, ax=ax[0], extend='both')
pcm = ax[1].pcolormesh(X, Y, Z1, cmap='RdBu_r', vmin=-np.max(Z1))
fig.colorbar(pcm, ax=ax[1], extend='both')
pltshow(plt)
| mit |
jbrackins/scheduling-research | src/reports.py | 1 | 9850 | import matplotlib.pyplot as plt
import os
import datetime as dt
import dateutil.parser as dparser
import collections
import matplotlib.dates as md
import dateutil
from rec import CourseRecord
LARGE_ROOM_TIER = 60
MEDIUM_ROOM_TIER = 40
class ScheduleReport:
"""Schedule Report class. Output graphs for scheduler...
The ScheduleReport Class
Attributes:
tbd
"""
def __init__(self, yr, sem, data, x_label, y_label):
"""ScheduleReport initialization method.
"""
self.day = {"M": "Monday", "T": "Tuesday", "W": "Wednesday",
"R": "Thursday", "F": "Friday", "S": "Saturday"}
self.path = "../reports/"
self.year = yr
self.semester = sem
self.course_times = self.set_course_times()
self.x_label = x_label
self.y_label = y_label
self.data = data
def get_size_tier(self,size):
tier = None
if size > LARGE_ROOM_TIER:
tier = "L" # Large room
elif size > MEDIUM_ROOM_TIER:
tier = "M" # Medium Room
else:
tier = "S" # Small Room
return tier
def set_course_times(self):
course_times = []
for i in range(7, 20):
for j in range(0,60,60):
hr = str(i)
if j < 10:
mn = "0" + str(j)
else:
mn = str(j)
time = dparser.parse(hr+":"+mn)
time = time.strftime('%H:%M')
course_times.append(time)
return course_times
def get_course_times(self):
return self.course_times
def fill_timeline(self,time_line,time_label):
for time in self.course_times:
time = str(time)
if time not in time_line:
time_label[time] = ""
time_line[time] = 0
return time_line,time_label
def build_path(self,yr, sem, location, classrooms):
old_dir = os.getcwd()
sem += "_" + classrooms + "_classrooms"
if not os.path.exists(self.path):
os.mkdir(self.path)
os.chdir(self.path)
if not os.path.exists(yr):
os.mkdir(yr)
os.chdir(yr)
if not os.path.exists(sem):
os.mkdir(sem)
os.chdir(sem)
if not os.path.exists(location):
os.mkdir(location)
os.chdir(old_dir)
def valid_label(self,label):
if label == None:
return 0
else:
return int(label)
def get_label(self,subject,course_num,count,capacity):
count = self.valid_label(count)
capacity = self.valid_label(capacity)
label = str(subject) + " "
label += str(course_num) + "\n"
label += "[[" + str(count) + " / "
label += str(capacity) + "]]"
return label
def update_label(self,old_label,subject,course_num,count,capacity):
# Get rid of excess characters in string
#print(old_label)
old_label = old_label.split("[[")[1].replace("]]","").replace(" ","").split("/")
#print(old_label[1])
# validate labels to make sure they aren't crap
old_count = self.valid_label(old_label[0])
old_capacity = self.valid_label(old_label[1])
count = self.valid_label(count)
capacity = self.valid_label(capacity)
count += old_count
capacity += old_capacity
return self.get_label(subject,course_num,count,capacity)
def generate_plot_seat_percentage(self,course_list,day):
# Generate plot data, as well as labels, etc
plot_data = {}
plot_label = {}
for course, score in course_list:
time = course.rec["START_TIME"]
end = course.rec["END_TIME"]
subject = course.rec["SUBJECT"]
number = course.rec["COURSE_NUM"]
count = course.rec["STUDENT_COUNT"]
capacity = course.rec["SECTION_CAPACITY"]
percent = score.percentage_score
if time in plot_data:
plot_data[time] += percent
# update label
plot_label[time] = self.update_label(plot_label[time],subject,number,count,capacity)
else:
plot_data[time] = percent
#plot_data[time] += percent
# new label
plot_label[time] = self.get_label(subject,number,count,capacity)
# if end in plot_data:
# plot_data[end] += percent
# else:
# plot_data[end] = percent
for key in plot_data:
plot_data[key] *= 100.00
if plot_data[key] > 110.00:
plot_data[key] = 110.00
# sort the plot by time
plot_data,plot_label = self.fill_timeline(plot_data,plot_label)
plot_data = collections.OrderedDict(sorted(plot_data.items()))
plot_label = collections.OrderedDict(sorted(plot_label.items()))
#print(list(plot_label.values()))
#print(plot_data.values())
return plot_data, list(plot_label.values())
def generate_plot_seat_labels(self,eval_data,location,times):
labels = []
for time in times:
course = eval_data.find_course(location, time)
if course != None:
lbl = course.get_rec("SUBJECT") + " "
lbl += str(course.get_rec("COURSE_NUM")) + "\n"
lbl += "(" + str(course.get_rec("STUDENT_COUNT")) + " / "
lbl += str(course.get_rec("SECTION_CAPACITY")) + ")"
else:
# Pass a blank label so we have a correct total
lbl = ""
labels.append( lbl )
return labels
def plot_seat_percentage(self,location,weekday,capacity,score_ind, score_tot,rank,counter,classrooms):
# Plot Title
room_title = "Room Usage Statistics for "
room_title += location + " on " + self.day[weekday] + "s"
room_title += " - " + self.semester + " " + self.year
plt.figure(figsize=(20,10))
# Set up title and axes
plt.title(room_title)
plt.xlabel('Course Time')
y_lab = 'Room Utilization (in percent)'
plt.ylabel(y_lab)
# Prepare plot data
ax=plt.gca()
plot, labels = self.generate_plot_seat_percentage(self.data.get_records(),weekday)
# Prepare message displayed in plot
message = location + " student capacity is " + str(capacity) + " (" + self.get_size_tier(capacity) + " Room)\n"
message += self.day[weekday] + " weighted score for " + location + " is " + str(score_ind) + "\n"
message += "Total weighted score for " + location + " is " + str(score_tot) + "\n"
message += location + "'s Rank is " + str(rank) + " / 10.00"
plt.text(3, 130, message ,
horizontalalignment='right', backgroundcolor='pink')
# Set up the bar graph
ax.grid(zorder=0)
plt.bar(range(len(plot)), plot.values(), align='center',zorder=3)
plt.xticks(range(len(plot)), list(plot.keys()))
plt.axis(ymin = 0, ymax= 150)
plt.xticks(rotation=70)
# Set labels on each bar
rects = ax.patches
for rect, label in zip(rects, labels):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height + 5, label, ha='center', va='bottom')
# Write plot to file
self.build_path(self.year, self.semester, location.replace(" ","_"), classrooms)
plt_file = self.path + self.year + "/" + self.semester + "_" + classrooms + "_classrooms"
plt_file += "/" + location.replace(" ","_") + "/"
plt_file += str(counter) + "_" + self.day[weekday] + "_" + location.replace(" ", "_")
#print(plt_file)
plt.savefig(plt_file, format='pdf', bbox_inches='tight')
plt.clf()
plt.close()
def generate_report(self,good,bad,classrooms,middle):
report_file = self.path + self.year + "/" + self.semester + "_" + classrooms + "_classrooms"
report_file += "/" + "report.txt"
file = open(report_file, 'w')
good_list = sorted(list(good.keys()), key=lambda x: (good[x]['rank'], good[x]['score']))
good_list.reverse()
bad_list = sorted(list(bad.keys()), key=lambda x: (bad[x]['rank'], bad[x]['score']))
bad_list.reverse()
msg = "\n---SCHEDULE EVALUATION REPORT:-------------------------\n"
if classrooms == "large":
msg += "Evaluation of all large rooms on campus\n"
elif classrooms == "all":
msg += "Evaluation of all interest rooms on campus\n"
msg += "Rooms with a ranking >= " + str(middle) + ":\n"
for room in good_list:
msg += "{:12s}".format(room) + " " + "SCORE: "
msg += "{0:.2f}".format(good[room]["score"]) + " " + "RANK: "
msg += "{0:.2f}".format(good[room]["rank"] ) + "\n"
msg += "\n-------------------------------------------------------\n\n"
msg += "Rooms with a ranking < " + str(middle) + ":\n"
for room in bad_list:
msg += "{:12s}".format(room) + " " + "SCORE: "
msg += "{0:.2f}".format(bad[room]["score"]) + " " + "RANK: "
msg += "{0:.2f}".format(bad[room]["rank"] ) + "\n"
msg += "\n-------------------------------------------------------\n\n"
best = good_list[0]
worst = bad_list[-1]
msg += "Best Room Based on Ranking: " + best + "\n"
msg += "Worst Room Based on Ranking: " + worst + "\n"
# write the message to the file and close it. You're done!
file.write(msg)
file.close()
| unlicense |
Edeleon4/PoolShark | Python279/share/doc/networkx-1.9.1/examples/drawing/knuth_miles.py | 36 | 2994 | #!/usr/bin/env python
"""
An example using networkx.Graph().
miles_graph() returns an undirected graph over the 128 US cities from
the datafile miles_dat.txt. The cities each have location and population
data. The edges are labeled with the distance betwen the two cities.
This example is described in Section 1.1 in Knuth's book [1,2].
References.
-----------
[1] Donald E. Knuth,
"The Stanford GraphBase: A Platform for Combinatorial Computing",
ACM Press, New York, 1993.
[2] http://www-cs-faculty.stanford.edu/~knuth/sgb.html
"""
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2004-2006 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
def miles_graph():
""" Return the cites example graph in miles_dat.txt
from the Stanford GraphBase.
"""
# open file miles_dat.txt.gz (or miles_dat.txt)
import gzip
fh = gzip.open('knuth_miles.txt.gz','r')
G=nx.Graph()
G.position={}
G.population={}
cities=[]
for line in fh.readlines():
line = line.decode()
if line.startswith("*"): # skip comments
continue
numfind=re.compile("^\d+")
if numfind.match(line): # this line is distances
dist=line.split()
for d in dist:
G.add_edge(city,cities[i],weight=int(d))
i=i+1
else: # this line is a city, position, population
i=1
(city,coordpop)=line.split("[")
cities.insert(0,city)
(coord,pop)=coordpop.split("]")
(y,x)=coord.split(",")
G.add_node(city)
# assign position - flip x axis for matplotlib, shift origin
G.position[city]=(-int(x)+7500,int(y)-3000)
G.population[city]=float(pop)/1000.0
return G
if __name__ == '__main__':
import networkx as nx
import re
import sys
G=miles_graph()
print("Loaded miles_dat.txt containing 128 cities.")
print("digraph has %d nodes with %d edges"\
%(nx.number_of_nodes(G),nx.number_of_edges(G)))
# make new graph of cites, edge if less then 300 miles between them
H=nx.Graph()
for v in G:
H.add_node(v)
for (u,v,d) in G.edges(data=True):
if d['weight'] < 300:
H.add_edge(u,v)
# draw with matplotlib/pylab
try:
import matplotlib.pyplot as plt
plt.figure(figsize=(8,8))
# with nodes colored by degree sized by population
node_color=[float(H.degree(v)) for v in H]
nx.draw(H,G.position,
node_size=[G.population[v] for v in H],
node_color=node_color,
with_labels=False)
# scale the axes equally
plt.xlim(-5000,500)
plt.ylim(-2000,3500)
plt.savefig("knuth_miles.png")
except:
pass
| mit |
costypetrisor/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_scalability.py | 225 | 5719 | """
============================================
Scalability of Approximate Nearest Neighbors
============================================
This example studies the scalability profile of approximate 10-neighbors
queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200``
when varying the number of samples in the dataset.
The first plot demonstrates the relationship between query time and index size
of LSHForest. Query time is compared with the brute force method in exact
nearest neighbor search for the same index sizes. The brute force queries have a
very predictable linear scalability with the index (full scan). LSHForest index
have sub-linear scalability profile but can be slower for small datasets.
The second plot shows the speedup when using approximate queries vs brute force
exact queries. The speedup tends to increase with the dataset size but should
reach a plateau typically when doing queries on datasets with millions of
samples and a few hundreds of dimensions. Higher dimensional datasets tends to
benefit more from LSHForest indexing.
The break even point (speedup = 1) depends on the dimensionality and structure
of the indexed data and the parameters of the LSHForest index.
The precision of approximate queries should decrease slowly with the dataset
size. The speed of the decrease depends mostly on the LSHForest parameters and
the dimensionality of the data.
"""
from __future__ import division
print(__doc__)
# Authors: Maheshakya Wijewardena <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
###############################################################################
import time
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Parameters of the study
n_samples_min = int(1e3)
n_samples_max = int(1e5)
n_features = 100
n_centers = 100
n_queries = 100
n_steps = 6
n_iter = 5
# Initialize the range of `n_samples`
n_samples_values = np.logspace(np.log10(n_samples_min),
np.log10(n_samples_max),
n_steps).astype(np.int)
# Generate some structured data
rng = np.random.RandomState(42)
all_data, _ = make_blobs(n_samples=n_samples_max + n_queries,
n_features=n_features, centers=n_centers, shuffle=True,
random_state=0)
queries = all_data[:n_queries]
index_data = all_data[n_queries:]
# Metrics to collect for the plots
average_times_exact = []
average_times_approx = []
std_times_approx = []
accuracies = []
std_accuracies = []
average_speedups = []
std_speedups = []
# Calculate the average query time
for n_samples in n_samples_values:
X = index_data[:n_samples]
# Initialize LSHForest for queries of a single neighbor
lshf = LSHForest(n_estimators=20, n_candidates=200,
n_neighbors=10).fit(X)
nbrs = NearestNeighbors(algorithm='brute', metric='cosine',
n_neighbors=10).fit(X)
time_approx = []
time_exact = []
accuracy = []
for i in range(n_iter):
# pick one query at random to study query time variability in LSHForest
query = queries[rng.randint(0, n_queries)]
t0 = time.time()
exact_neighbors = nbrs.kneighbors(query, return_distance=False)
time_exact.append(time.time() - t0)
t0 = time.time()
approx_neighbors = lshf.kneighbors(query, return_distance=False)
time_approx.append(time.time() - t0)
accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean())
average_time_exact = np.mean(time_exact)
average_time_approx = np.mean(time_approx)
speedup = np.array(time_exact) / np.array(time_approx)
average_speedup = np.mean(speedup)
mean_accuracy = np.mean(accuracy)
std_accuracy = np.std(accuracy)
print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, "
"accuracy: %0.2f +/-%0.2f" %
(n_samples, average_time_exact, average_time_approx, average_speedup,
mean_accuracy, std_accuracy))
accuracies.append(mean_accuracy)
std_accuracies.append(std_accuracy)
average_times_exact.append(average_time_exact)
average_times_approx.append(average_time_approx)
std_times_approx.append(np.std(time_approx))
average_speedups.append(average_speedup)
std_speedups.append(np.std(speedup))
# Plot average query time against n_samples
plt.figure()
plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx,
fmt='o-', c='r', label='LSHForest')
plt.plot(n_samples_values, average_times_exact, c='b',
label="NearestNeighbors(algorithm='brute', metric='cosine')")
plt.legend(loc='upper left', fontsize='small')
plt.ylim(0, None)
plt.ylabel("Average query time in seconds")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Impact of index size on response time for first "
"nearest neighbors queries")
# Plot average query speedup versus index size
plt.figure()
plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups,
fmt='o-', c='r')
plt.ylim(0, None)
plt.ylabel("Average speedup")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Speedup of the approximate NN queries vs brute force")
# Plot average precision versus index size
plt.figure()
plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c')
plt.ylim(0, 1.1)
plt.ylabel("precision@10")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("precision of 10-nearest-neighbors queries with index size")
plt.show()
| bsd-3-clause |
pv/scikit-learn | sklearn/externals/joblib/parallel.py | 86 | 35087 | """
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
from __future__ import division
import os
import sys
import gc
import warnings
from math import sqrt
import functools
import time
import threading
import itertools
from numbers import Integral
try:
import cPickle as pickle
except:
import pickle
from ._multiprocessing_helpers import mp
if mp is not None:
from .pool import MemmapingPool
from multiprocessing.pool import ThreadPool
from .format_stack import format_exc, format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_kbytes
from ._compat import _basestring
VALID_BACKENDS = ['multiprocessing', 'threading']
# Environment variables to protect against bad situations when nesting
JOBLIB_SPAWNED_PROCESS = "__JOBLIB_SPAWNED_PARALLEL__"
# In seconds, should be big enough to hide multiprocessing dispatching
# overhead.
# This settings was found by running benchmarks/bench_auto_batching.py
# with various parameters on various platforms.
MIN_IDEAL_BATCH_DURATION = .2
# Should not be too high to avoid stragglers: long jobs running alone
# on a single worker while other workers have no work to process any more.
MAX_IDEAL_BATCH_DURATION = 2
class BatchedCalls(object):
"""Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
def __init__(self, iterator_slice):
self.items = list(iterator_slice)
self._size = len(self.items)
def __call__(self):
return [func(*args, **kwargs) for func, args, kwargs in self.items]
def __len__(self):
return self._size
###############################################################################
# CPU count that works also when multiprocessing has been disabled via
# the JOBLIB_MULTIPROCESSING environment variable
def cpu_count():
""" Return the number of CPUs.
"""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
class WorkerInterrupt(Exception):
""" An exception that is not KeyboardInterrupt to allow subprocesses
to be interrupted.
"""
pass
###############################################################################
class SafeFunction(object):
""" Wraps a function to make it exception with full traceback in
their representation.
Useful for parallel computing with multiprocessing, for which
exceptions cannot be captured.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except KeyboardInterrupt:
# We capture the KeyboardInterrupt and reraise it as
# something different, as multiprocessing does not
# interrupt processing for a KeyboardInterrupt
raise WorkerInterrupt()
except:
e_type, e_value, e_tb = sys.exc_info()
text = format_exc(e_type, e_value, e_tb, context=10,
tb_offset=1)
if issubclass(e_type, TransportableException):
raise
else:
raise TransportableException(text, e_type)
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class ImmediateComputeBatch(object):
"""Sequential computation of a batch of tasks.
This replicates the async computation API but actually does not delay
the computations when joblib.Parallel runs in sequential mode.
"""
def __init__(self, batch):
# Don't delay the application, to avoid keeping the input
# arguments in memory
self.results = batch()
def get(self):
return self.results
###############################################################################
class BatchCompletionCallBack(object):
"""Callback used by joblib.Parallel's multiprocessing backend.
This callable is executed by the parent process whenever a worker process
has returned the results of a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
"""
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
def __call__(self, out):
self.parallel.n_completed_tasks += self.batch_size
this_batch_duration = time.time() - self.dispatch_timestamp
if (self.parallel.batch_size == 'auto'
and self.batch_size == self.parallel._effective_batch_size):
# Update the smoothed streaming estimate of the duration of a batch
# from dispatch to completion
old_duration = self.parallel._smoothed_batch_duration
if old_duration == 0:
# First record of duration for this batch size after the last
# reset.
new_duration = this_batch_duration
else:
# Update the exponentially weighted average of the duration of
# batch for the current effective size.
new_duration = 0.8 * old_duration + 0.2 * this_batch_duration
self.parallel._smoothed_batch_duration = new_duration
self.parallel.print_progress()
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int, default: 1
The maximum number of concurrently running jobs, such as the number
of Python worker processes when backend="multiprocessing"
or the size of the thread-pool when backend="threading".
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend: str or None, default: 'multiprocessing'
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the multiprocessing workers shoud never starve.
batch_size: int or 'auto', default: 'auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, multiprocessing
can be slower than sequential computation because of the overhead.
Batching fast computations together can mitigate this.
The ``'auto'`` strategy keeps track of the time it takes for a batch
to complete, and dynamically adjusts the batch size to keep the time
on the order of half a second, using a heuristic. The initial batch
size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes int, str, or None, optional, 1M by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages::
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process::
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called a 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages::
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend='multiprocessing', verbose=0,
pre_dispatch='2 * n_jobs', batch_size='auto', temp_folder=None,
max_nbytes='1M', mmap_mode='r'):
self.verbose = verbose
self._mp_context = None
if backend is None:
# `backend=None` was supported in 0.8.2 with this effect
backend = "multiprocessing"
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._mp_context = backend
backend = "multiprocessing"
if backend not in VALID_BACKENDS:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, VALID_BACKENDS))
self.backend = backend
self.n_jobs = n_jobs
if (batch_size == 'auto'
or isinstance(batch_size, Integral) and batch_size > 0):
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r"
% batch_size)
self.pre_dispatch = pre_dispatch
self._temp_folder = temp_folder
if isinstance(max_nbytes, _basestring):
self._max_nbytes = 1024 * memstr_to_kbytes(max_nbytes)
else:
self._max_nbytes = max_nbytes
self._mmap_mode = mmap_mode
# Not starting the pool in the __init__ is a design decision, to be
# able to close it ASAP, and not burden the user with closing it
# unless they choose to use the context manager API with a with block.
self._pool = None
self._output = None
self._jobs = list()
self._managed_pool = False
# This lock is used coordinate the main thread of this process with
# the async callback thread of our the pool.
self._lock = threading.Lock()
def __enter__(self):
self._managed_pool = True
self._initialize_pool()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._terminate_pool()
self._managed_pool = False
def _effective_n_jobs(self):
n_jobs = self.n_jobs
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
elif mp is None or n_jobs is None:
# multiprocessing is not available or disabled, fallback
# to sequential mode
return 1
elif n_jobs < 0:
n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1)
return n_jobs
def _initialize_pool(self):
"""Build a process or thread pool and return the number of workers"""
n_jobs = self._effective_n_jobs()
# The list of exceptions that we will capture
self.exceptions = [TransportableException]
if n_jobs == 1:
# Sequential mode: do not use a pool instance to avoid any
# useless dispatching overhead
self._pool = None
elif self.backend == 'threading':
self._pool = ThreadPool(n_jobs)
elif self.backend == 'multiprocessing':
if mp.current_process().daemon:
# Daemonic processes cannot have children
self._pool = None
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested,'
' setting n_jobs=1',
stacklevel=3)
return 1
elif threading.current_thread().name != 'MainThread':
# Prevent posix fork inside in non-main posix threads
self._pool = None
warnings.warn(
'Multiprocessing backed parallel loops cannot be nested'
' below threads, setting n_jobs=1',
stacklevel=3)
return 1
else:
already_forked = int(os.environ.get(JOBLIB_SPAWNED_PROCESS, 0))
if already_forked:
raise ImportError('[joblib] Attempting to do parallel computing '
'without protecting your import on a system that does '
'not support forking. To use parallel-computing in a '
'script, you must protect your main loop using "if '
"__name__ == '__main__'"
'". Please see the joblib documentation on Parallel '
'for more information'
)
# Set an environment variable to avoid infinite loops
os.environ[JOBLIB_SPAWNED_PROCESS] = '1'
# Make sure to free as much memory as possible before forking
gc.collect()
poolargs = dict(
max_nbytes=self._max_nbytes,
mmap_mode=self._mmap_mode,
temp_folder=self._temp_folder,
verbose=max(0, self.verbose - 50),
context_id=0, # the pool is used only for one call
)
if self._mp_context is not None:
# Use Python 3.4+ multiprocessing context isolation
poolargs['context'] = self._mp_context
self._pool = MemmapingPool(n_jobs, **poolargs)
# We are using multiprocessing, we also want to capture
# KeyboardInterrupts
self.exceptions.extend([KeyboardInterrupt, WorkerInterrupt])
else:
raise ValueError("Unsupported backend: %s" % self.backend)
return n_jobs
def _terminate_pool(self):
if self._pool is not None:
self._pool.close()
self._pool.terminate() # terminate does a join()
self._pool = None
if self.backend == 'multiprocessing':
os.environ.pop(JOBLIB_SPAWNED_PROCESS, 0)
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
if self._pool is None:
job = ImmediateComputeBatch(batch)
self._jobs.append(job)
self.n_dispatched_batches += 1
self.n_dispatched_tasks += len(batch)
self.n_completed_tasks += len(batch)
if not _verbosity_filter(self.n_dispatched_batches, self.verbose):
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(time.time() - self._start_time)
))
else:
dispatch_timestamp = time.time()
cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
job = self._pool.apply_async(SafeFunction(batch), callback=cb)
self._jobs.append(job)
self.n_dispatched_tasks += len(batch)
self.n_dispatched_batches += 1
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self.batch_size == 'auto' and self.backend == 'threading':
# Batching is never beneficial with the threading backend
batch_size = 1
elif self.batch_size == 'auto':
old_batch_size = self._effective_batch_size
batch_duration = self._smoothed_batch_duration
if (batch_duration > 0 and
batch_duration < MIN_IDEAL_BATCH_DURATION):
# The current batch size is too small: the duration of the
# processing of a batch of task is not large enough to hide
# the scheduling overhead.
ideal_batch_size = int(
old_batch_size * MIN_IDEAL_BATCH_DURATION / batch_duration)
# Multiply by two to limit oscilations between min and max.
batch_size = max(2 * ideal_batch_size, 1)
self._effective_batch_size = batch_size
if self.verbose >= 10:
self._print("Batch computation too fast (%.4fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
elif (batch_duration > MAX_IDEAL_BATCH_DURATION and
old_batch_size >= 2):
# The current batch size is too big. If we schedule overly long
# running batches some CPUs might wait with nothing left to do
# while a couple of CPUs a left processing a few long running
# batches. Better reduce the batch size a bit to limit the
# likelihood of scheduling such stragglers.
self._effective_batch_size = batch_size = old_batch_size // 2
if self.verbose >= 10:
self._print("Batch computation too slow (%.2fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
else:
# No batch size adjustment
batch_size = old_batch_size
if batch_size != old_batch_size:
# Reset estimation of the smoothed mean batch duration: this
# estimate is updated in the multiprocessing apply_async
# CallBack as long as the batch_size is constant. Therefore
# we need to reset the estimate whenever we re-tune the batch
# size.
self._smoothed_batch_duration = 0
else:
# Fixed batch size strategy
batch_size = self.batch_size
with self._lock:
tasks = BatchedCalls(itertools.islice(iterator, batch_size))
if not tasks:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _print(self, msg, msg_args):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# This is heuristic code to print only 'verbose' times a messages
# The challenge is that we may not know the queue length
if self._original_iterator:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(elapsed_time),
))
else:
index = self.n_dispatched_batches
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (total_tasks - index + 1
- self._pre_dispatch_amount)
frequency = (total_tasks // self.verbose) + 1
is_last_item = (index + 1 == total_tasks)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / (index + 1) *
(self.n_dispatched_tasks - index - 1.))
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index + 1,
total_tasks,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job list can be filling up as
# we empty it and Python list are not thread-safe by default hence
# the use of the lock
with self._lock:
job = self._jobs.pop(0)
try:
self._output.extend(job.get())
except tuple(self.exceptions) as exception:
# Stop dispatching any new job in the async callback thread
self._aborting = True
if isinstance(exception, TransportableException):
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (this_report, exception.message)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
exception = exception_type(report)
# Kill remaining running processes without waiting for
# the results as we will raise the exception we got back
# to the caller instead of returning any result.
with self._lock:
self._terminate_pool()
if self._managed_pool:
# In case we had to terminate a managed pool, let
# us start a new one to ensure that subsequent calls
# to __call__ on the same Parallel instance will get
# a working pool as they expect.
self._initialize_pool()
raise exception
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
if not self._managed_pool:
n_jobs = self._initialize_pool()
else:
n_jobs = self._effective_n_jobs()
if self.batch_size == 'auto':
self._effective_batch_size = 1
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == 'all' or n_jobs == 1:
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
iterator = itertools.islice(iterator, pre_dispatch)
self._start_time = time.time()
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
self._smoothed_batch_duration = 0.0
try:
self._iterating = True
while self.dispatch_one_batch(iterator):
pass
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output), len(self._output),
short_format_time(elapsed_time)))
finally:
if not self._managed_pool:
self._terminate_pool()
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
| bsd-3-clause |
degoldschmidt/pytrack-analysis | pytrack_analysis/geometry.py | 1 | 16281 | import numpy as np
import pandas as pd
import platform, subprocess
import os, sys
import os.path as op
import cv2
from io import StringIO
from pytrack_analysis.cli import colorprint, flprint, prn
from pytrack_analysis.image_processing import VideoCaptureAsync, VideoCapture, match_templates, get_peak_matches, preview
from pytrack_analysis.yamlio import read_yaml, write_yaml
NAMESPACE = 'geometry'
"""
Returns angle between to given points centered on pt1 (GEOMETRY)
"""
def get_angle(pt1, pt2, flipy=False):
dx = pt2[0]-pt1[0]
if flipy:
dy = pt1[1]-pt2[1]
else:
dy = pt2[1]-pt1[1]
return np.arctan2(dy,dx)
"""
Returns distance between to given points (GEOMETRY)
"""
def get_distance(pt1, pt2):
dx = pt1[0]-pt2[0]
dy = pt1[1]-pt2[1]
return np.sqrt(dx**2 + dy**2)
"""
Returns rotation matrix for given angle in two dimensions (GEOMETRY)
"""
def rot(angle, in_degrees=False):
if in_degrees:
rads = np.radians(angle)
return np.array([[np.cos(rads), -np.sin(rads)],[np.sin(rads), np.cos(rads)]])
else:
return np.array([[np.cos(angle), -np.sin(angle)],[np.sin(angle), np.cos(angle)]])
"""
Returns rotated vector for given angle in two dimensions (GEOMETRY)
"""
"""
def rot(angle, in_degrees=False):
if in_degrees:
rads = np.radians(angle)
return np.array([[np.cos(rads), -np.sin(rads)],[np.sin(rads), np.cos(rads)]])
else:
return np.array([[np.cos(angle), -np.sin(angle)],[np.sin(angle), np.cos(angle)]])
"""
class Arena(object):
def __init__(self, x, y, scale, c, layout=None):
self.x = x
self.y = y
self.r = scale * 25.
self.r0 = scale * .1
self.scale = scale
self.rotation = 0.
self.substr = ['yeast', 'sucrose']
self.spots = self.get_rings((scale, 10., 0., np.pi/3., 0), (scale, 20., np.pi/6., np.pi/3., 1))
def get_dict(self):
return {'x': self.x, 'y': self.y, 'radius': self.r, 'scale': self.scale, 'rotation': self.rotation}
def move_to(self, x, y):
dx = x - self.x
dy = y - self.y
self.x = x
self.y = y
for spot in self.spots:
spot.move_by(dx, dy)
def rotate_by(self, value):
self.rotation += value
self.rotation = round(self.rotation, 2)
self.spots = self.get_rings((self.scale, 10., 0.+self.rotation, np.pi/3., 0), (self.scale, 20., np.pi/6.+self.rotation, np.pi/3., 1))
def scale_by(self, value):
self.scale += value
self.scale = round(self.scale, 5)
if self.scale < 0.0:
self.scale = 0.00
self.r = self.scale * 25.
self.r0 = self.scale * .5
self.spots = self.get_rings((self.scale, 10., 0.+self.rotation, np.pi/3., 0), (self.scale, 20., np.pi/6.+self.rotation, np.pi/3., 1))
def get_rings(self, *args):
"""
takes tuples: (scale, distance, offset, interval, substrate_move)
"""
out = []
for arg in args:
sc = arg[0]
r = sc * arg[1]
t = arg[2]
w = arg[3]
sm = arg[4]
angles = np.arange(t, t+2*np.pi, w)
xs, ys = r * np.cos(angles), r * np.sin(angles)
for i, (x, y) in enumerate(zip(xs, ys)):
out.append(Spot(x+self.x, y+self.y, sc * 1.5, self.substr[(i+sm)%2]))
return out
class Spot(object):
def __init__(self, x, y, r, s):
self.x = x
self.y = y
self.r = r
self.substrate = s
def move_by(self, dx, dy):
self.x += dx
self.y += dy
def move_to(self, x, y):
self.x = x
self.y = y
def toggle_substrate(self):
s = {'yeast': 'sucrose', 'sucrose': 'yeast'}
self.substrate = s[self.substrate]
def detect_geometry(_fullpath, _timestr, onlyIm=False):
setup = op.basename(_fullpath).split('_')[0]
video = VideoCapture(_fullpath, 0)
dir = op.dirname(_fullpath)
if not op.isdir(op.join(dir, 'pytrack_res', 'arena')):
os.mkdir(op.join(dir, 'pytrack_res', 'arena'))
outfile = op.join(dir, 'pytrack_res','arena', setup+'_arena_' +_timestr+'.yaml')
img = video.get_average(100) #### takes average of 100 frames
video.stop()
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
### this is for putting more templates
if not op.isdir(op.join(dir, 'pytrack_res', 'templates')):
os.mkdir(op.join(dir, 'pytrack_res', 'templates'))
if not op.isdir(op.join(dir, 'pytrack_res', 'templates', setup)):
os.mkdir(op.join(dir, 'pytrack_res', 'templates', setup))
os.mkdir(op.join(dir, 'pytrack_res', 'templates', setup, 'temp'))
cv2.imwrite(op.join(dir, 'pytrack_res', 'templates', setup, 'temp', setup+'_'+_timestr+'.png'),img)
if onlyIm:
return None
"""
Get arenas
"""
thresh = 0.9
arenas = []
while len(arenas) != 4:
img_rgb = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB) ### this is for coloring
### Template matching function
loc, vals, w = match_templates(img, 'arena', setup, thresh, dir=dir)
patches = get_peak_matches(loc, vals, w, img_rgb)
arenas = patches
### Required to have 4 arenas detected, lower threshold if not matched
if len(arenas) < 4:
thresh -= 0.05
thresh = round(thresh,2)
print('Not enough arenas detected. Decrease matching threshold to {}.'.format(thresh))
elif len(arenas) == 4:
print('Detected 4 arenas. Exiting arena detection.')
for pt in arenas:
ept = (int(round(pt[0]+w/2)), int(round(pt[1]+w/2)))
cv2.circle(img_rgb, ept, int(w/2), (0,255,0), 1)
cv2.circle(img_rgb, ept, 1, (0,255,0), 2)
#preview(img_rgb)
""" Geometry correction algorithm (not used)
for i, arena in enumerate(arenas):
print('(x, y) = ({}, {})'.format(arena[0], arena[1]))
for i, j in zip([0, 1, 3, 2], [1, 3, 2, 0]):
print('distance ({}-{}) = {}'.format(i, j, get_distance(arenas[i], arenas[j])))
print('Angle: {}'.format(get_angle(arenas[i], arenas[j], flipy=True)))
for i, j, k in zip([3, 2, 1, 0], [1, 0, 0, 1], [2, 3, 3, 2]): ### i is anchor
for m in range(4):
if m not in [i, j, k]:
print(m)
da = [arenas[j][0]-arenas[i][0], arenas[j][1]-arenas[i][1]]
pta = [arenas[k][0]+da[0], arenas[k][1]+da[1]]
db = [arenas[k][0]-arenas[i][0], arenas[k][1]-arenas[i][1]]
ptb = [arenas[j][0]+db[0], arenas[j][1]+db[1]]
if get_distance(pta, ptb) < 5:
pt = (int((pta[0]+ptb[0])/2 + w/2), int((pta[1]+ptb[1])/2 + w/2))
cv2.circle(img_rgb, pt, int(w/2), (255,0,255), 1)
cv2.circle(img_rgb, pt, 1, (255,0,255), 2)
"""
preview(img_rgb, title='Preview arena', topleft='Threshold: {}'.format(thresh), hold=True, write=True, writeto=op.join(dir, 'pytrack_res','arena', '{}_arena.jpg'.format(_timestr)))
"""
Get spots
"""
labels = ['topleft', 'topright', 'bottomleft', 'bottomright']
geometry = {}
for ia, arena in enumerate(arenas):
arena_img = img[arena[1]:arena[1]+w, arena[0]:arena[0]+w]
c_arena = (arena[0]+w/2, arena[1]+w/2)
if c_arena[1]<700:
label='top'
else:
label='bottom'
if c_arena[0]<700:
label += 'left'
else:
label += 'right'
spots = []
thresh = 0.99
min_spots = 6
max_spots = 8
while len(spots) < min_spots:
img_rgb = cv2.cvtColor(arena_img,cv2.COLOR_GRAY2RGB) ### this is for coloring
### Template matching function
loc, vals, ws = match_templates(arena_img, 'yeast', setup, thresh, dir=dir)
patches = get_peak_matches(loc, vals, ws, img_rgb, arena=arena)
spots = patches
### Required to have 6 yeast spots detected, lower threshold if not matched
"""
elif len(spots) > max_spots:
thresh += 0.001
thresh = round(thresh,3)
print('Too many yeast spots detected. Increase matching threshold to {}.'.format(thresh))
"""
if len(spots) < min_spots:
thresh -= 0.005
thresh = round(thresh,3)
print('Not enough yeast spots detected. Decrease matching threshold to {}.'.format(thresh))
else:
#print('Detected 6 yeast spots. Exiting spot detection.')
spotdf = {'x': [], 'y': [], 'angle': [], 'distance': [], 'orientation': []}
inner, outer = [], []
for pt in spots:
ept = (int(round(pt[0]+ws/2)), int(round(pt[1]+ws/2)))
rx, ry = pt[0]+arena[0]+ws/2, pt[1]+arena[1]+ws/2
r = 10 * (w/50.)
dist = get_distance([rx, ry], c_arena)
angle = get_angle([c_arena[0], -c_arena[1]], [rx, -ry])
if angle < 0:
angle = 2*np.pi+angle
orientation = angle%(np.pi/6)
if orientation > np.pi/12:
orientation = orientation - np.pi/6
if dist > 1.5*r:
outer.append((rx, ry))
else:
inner.append((rx, ry))
spotdf['x'].append(rx-c_arena[0])
spotdf['y'].append(-(ry-c_arena[1]))
spotdf['distance'].append(dist)
spotdf['angle'].append(angle)
spotdf['orientation'].append(orientation)
cv2.circle(img_rgb, ept, int(ws/2), (255,0,255), 1)
cv2.circle(img_rgb, ept, 1, (255,0,255), 1)
inner_est, outer_est = None, None
if len(inner) == 3:
inner_est = (int(round(sum([inn[0] for inn in inner])/len(inner)-arena[0])), int(round(sum([inn[1] for inn in inner])/len(inner)-arena[1])))
if len(outer) == 3:
outer_est = (int(round(sum([out[0] for out in outer])/len(outer)-arena[0])), int(round(sum([out[1] for out in outer])/len(outer)-arena[1])))
if inner_est is None and outer_est is None:
mean_est = (int(round(w/2)), int(round(w/2)))
elif inner_est is None:
mean_est = outer_est
elif outer_est is None:
mean_est = inner_est
else:
mean_est = ( int(round(inner_est[0]+outer_est[0])/2), int(round(inner_est[1]+outer_est[1])/2))
cv2.circle(img_rgb, mean_est, 1, (0,255,0), 1)
spotdf = pd.DataFrame(spotdf)
mean_orient = spotdf['orientation'].mean()
correct_spots = {'x': [], 'y': [], 's': []}
for i, angle in enumerate(np.arange(mean_orient+np.pi/3,2*np.pi+mean_orient+np.pi/3, np.pi/3)):
for j in range(2):
x, y, s = (j+1)*r * np.cos(angle+j*np.pi/6), (j+1) *r*np.sin(angle+j*np.pi/6), i%2
correct_spots['x'].append(x)
correct_spots['y'].append(y)
if s == 0:
correct_spots['s'].append('yeast')
else:
correct_spots['s'].append('sucrose')
correct_spots = pd.DataFrame(correct_spots)
all_spots = []
for index, row in correct_spots.iterrows():
if row['s'] == 'yeast':
color = (0,165,255)
else:
color = (255, 144, 30)
x, y = row['x']+mean_est[0], -row['y']+mean_est[1]
scale = w/50.
all_spots.append({'x': row['x']/scale, 'y': row['y']/scale, 'r': 1.5, 'substr': row['s']})
cv2.circle(img_rgb, (int(x), int(y)), int(ws/2), color, 1)
cv2.circle(img_rgb, (int(x), int(y)), 1, color, 1)
geometry['fly{:02}'.format(ia+1)] = { 'arena': {'radius': w/2, 'outer': 260.0, 'scale': w/50., 'x': float(mean_est[0]+arena[0]), 'y': float(mean_est[1]+arena[1]), 'name': label}, 'food_spots': all_spots}
preview(img_rgb, title='Preview spots', topleft='Arena: {}, threshold: {}'.format(label, thresh), hold=True, write=True, writeto=op.join(dir, 'pytrack_res','arena', '{}_fly{}.jpg'.format(_timestr, ia)))
print('save geometry to {}'.format(outfile))
write_yaml(outfile, geometry)
return geometry
def manual_geometry(_fullpath, _timestr):
setup = op.basename(_fullpath).split('_')[0]
video = VideoCapture(_fullpath, 0)
dir = op.dirname(_fullpath)
if not op.isdir(op.join(dir, 'pytrack_res', 'arena')):
os.mkdir(op.join(dir, 'pytrack_res', 'arena'))
outfile = op.join(dir, 'pytrack_res','arena', setup+'_arena_' +_timestr+'_manual.yaml')
img = video.get_average(100) #### takes average of 100 frames
video.stop()
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
"""
Manual entry
"""
centers = []
widths = []
angles = []
for fly in range(4):
print('Fly {}:'.format(fly+1))
pts = []
for each in ['first', 'second', 'third']:
pts.append(input('Type coordinates of {} inner yeast spot: '.format(each)))
if pts[-1] is '':
pass
else:
pts = [each.split(' ') for each in pts]
pts = [(int(el[0]), int(el[1])) for el in pts]
centers.append((sum([el[0] for el in pts])/3, sum([el[1] for el in pts])/3))
centers[-1] = (int(round(centers[-1][0])), int(round(centers[-1][1])))
r = get_distance(pts[0],centers[-1])
widths.append(5*r)
angles.append(np.arctan2(-(pts[0][1]-centers[-1][1]), pts[0][0]-centers[-1][0]))
"""
Get arenas
"""
img_rgb = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB) ### this is for coloring
for pt, w in zip(centers, widths):
#for ept in pts:
# cv2.circle(img_rgb, ept, 1, (255,255,0), 2)
cv2.circle(img_rgb, pt, int(w/2), (0,255,0), 1)
cv2.circle(img_rgb, pt, 1, (0,255,0), 2)
preview(img_rgb, title='Preview arena', topleft='manual', hold=True)
"""
Get spots
"""
labels = ['topleft', 'topright', 'bottomleft', 'bottomright']
geometry = {}
ia = 0
for pt, w, a in zip(centers, widths, angles):
correct_spots = {'x': [], 'y': [], 's': []}
for i, angle in enumerate(np.arange(a, 2*np.pi+a, np.pi/3)):
for j in range(2):
r = w/5.
x, y, s = (j+1)*r * np.cos(angle+j*np.pi/6), (j+1)*r*np.sin(angle+j*np.pi/6), i%2
correct_spots['x'].append(x)
correct_spots['y'].append(y)
if s == 0:
correct_spots['s'].append('yeast')
else:
correct_spots['s'].append('sucrose')
correct_spots = pd.DataFrame(correct_spots)
all_spots = []
for index, row in correct_spots.iterrows():
if row['s'] == 'yeast':
color = (0,165,255)
else:
color = (255, 144, 30)
x, y = row['x']+pt[0], -row['y']+pt[1]
scale = w/50.
all_spots.append({'x': float(row['x']/scale), 'y': float(row['y']/scale), 'r': 1.5, 'substr': row['s']})
cv2.circle(img_rgb, (int(round(x)), int(round(y))), int(0.15*r), color, 1)
cv2.circle(img_rgb, (int(round(x)), int(round(y))), 1, color, 1)
geometry['fly{:02}'.format(ia+1)] = { 'arena': {'radius': float(w/2), 'outer': 260.0, 'scale': float(w/50.), 'x': float(pt[0]), 'y': float(pt[1]), 'name': labels[ia]}, 'food_spots': all_spots}
arena_img = img_rgb[pt[1]-int(w/2):pt[1]+int(w/2), pt[0]-int(w/2):pt[0]+int(w/2)]
preview(arena_img, title='Preview spots', topleft='Arena: {}'.format(labels[ia]), hold=True)
ia += 1
print('save geometry to {}'.format(outfile))
write_yaml(outfile, geometry)
return geometry
| gpl-3.0 |
ljwolf/pysal_core | libpysal/io/geotable/dbf.py | 2 | 6681 | """miscellaneous file manipulation utilities
"""
import numpy as np
import pandas as pd
from ..FileIO import FileIO as ps_open
def check_dups(li):
"""checks duplicates in list of ID values
ID values must be read in as a list
__author__ = "Luc Anselin <[email protected]> "
Arguments
---------
li : list of ID values
Returns
-------
a list with the duplicate IDs
"""
return list(set([x for x in li if li.count(x) > 1]))
def dbfdups(dbfpath,idvar):
"""checks duplicates in a dBase file
ID variable must be specified correctly
__author__ = "Luc Anselin <[email protected]> "
Arguments
---------
dbfpath : file path to dBase file
idvar : ID variable in dBase file
Returns
-------
a list with the duplicate IDs
"""
db = ps_open(dbfpath,'r')
li = db.by_col(idvar)
return list(set([x for x in li if li.count(x) > 1]))
def df2dbf(df, dbf_path, my_specs=None):
'''
Convert a pandas.DataFrame into a dbf.
__author__ = "Dani Arribas-Bel <[email protected]>, Luc Anselin <[email protected]>"
...
Arguments
---------
df : DataFrame
Pandas dataframe object to be entirely written out to a dbf
dbf_path : str
Path to the output dbf. It is also returned by the function
my_specs : list
List with the field_specs to use for each column.
Defaults to None and applies the following scheme:
* int: ('N', 14, 0) - for all ints
* float: ('N', 14, 14) - for all floats
* str: ('C', 14, 0) - for string, object and category
with all variants for different type sizes
Note: use of dtypes.name may not be fully robust, but preferred apprach of using
isinstance seems too clumsy
'''
if my_specs:
specs = my_specs
else:
"""
type2spec = {int: ('N', 20, 0),
np.int64: ('N', 20, 0),
np.int32: ('N', 20, 0),
np.int16: ('N', 20, 0),
np.int8: ('N', 20, 0),
float: ('N', 36, 15),
np.float64: ('N', 36, 15),
np.float32: ('N', 36, 15),
str: ('C', 14, 0)
}
types = [type(df[i].iloc[0]) for i in df.columns]
"""
# new approach using dtypes.name to avoid numpy name issue in type
type2spec = {'int': ('N', 20, 0),
'int8': ('N', 20, 0),
'int16': ('N', 20, 0),
'int32': ('N', 20, 0),
'int64': ('N', 20, 0),
'float': ('N', 36, 15),
'float32': ('N', 36, 15),
'float64': ('N', 36, 15),
'str': ('C', 14, 0),
'object': ('C', 14, 0),
'category': ('C', 14, 0)
}
types = [df[i].dtypes.name for i in df.columns]
specs = [type2spec[t] for t in types]
db = ps_open(dbf_path, 'w')
db.header = list(df.columns)
db.field_spec = specs
for i, row in df.T.iteritems():
db.write(row)
db.close()
return dbf_path
def dbf2df(dbf_path, index=None, cols=False, incl_index=False):
'''
Read a dbf file as a pandas.DataFrame, optionally selecting the index
variable and which columns are to be loaded.
__author__ = "Dani Arribas-Bel <[email protected]> "
...
Arguments
---------
dbf_path : str
Path to the DBF file to be read
index : str
Name of the column to be used as the index of the DataFrame
cols : list
List with the names of the columns to be read into the
DataFrame. Defaults to False, which reads the whole dbf
incl_index : Boolean
If True index is included in the DataFrame as a
column too. Defaults to False
Returns
-------
df : DataFrame
pandas.DataFrame object created
'''
db = ps_open(dbf_path)
if cols:
if incl_index:
cols.append(index)
vars_to_read = cols
else:
vars_to_read = db.header
data = dict([(var, db.by_col(var)) for var in vars_to_read])
if index:
index = db.by_col(index)
db.close()
return pd.DataFrame(data, index=index, columns=vars_to_read)
else:
db.close()
return pd.DataFrame(data,columns=vars_to_read)
def dbfjoin(dbf1_path,dbf2_path,out_path,joinkey1,joinkey2):
'''
Wrapper function to merge two dbf files into a new dbf file.
__author__ = "Luc Anselin <[email protected]> "
Uses dbf2df and df2dbf to read and write the dbf files into a pandas
DataFrame. Uses all default settings for dbf2df and df2dbf (see docs
for specifics).
...
Arguments
---------
dbf1_path : str
Path to the first (left) dbf file
dbf2_path : str
Path to the second (right) dbf file
out_path : str
Path to the output dbf file (returned by the function)
joinkey1 : str
Variable name for the key in the first dbf. Must be specified.
Key must take unique values.
joinkey2 : str
Variable name for the key in the second dbf. Must be specified.
Key must take unique values.
Returns
-------
dbfpath : path to output file
'''
df1 = dbf2df(dbf1_path,index=joinkey1)
df2 = dbf2df(dbf2_path,index=joinkey2)
dfbig = pd.merge(df1,df2,left_on=joinkey1,right_on=joinkey2,sort=False)
dp = df2dbf(dfbig,out_path)
return dp
def dta2dbf(dta_path,dbf_path):
"""
Wrapper function to convert a stata dta file into a dbf file.
__author__ = "Luc Anselin <[email protected]> "
Uses df2dbf to write the dbf files from a pandas
DataFrame. Uses all default settings for df2dbf (see docs
for specifics).
...
Arguments
---------
dta_path : str
Path to the Stata dta file
dbf_path : str
Path to the output dbf file
Returns
-------
dbf_path : path to output file
"""
db = pd.read_stata(dta_path)
dp = df2dbf(db,dbf_path)
return dp
| bsd-3-clause |
perryjohnson/biplaneblade | sandia_blade_lib/prep_stn23_mesh.py | 1 | 24693 | """Write initial TrueGrid files for one Sandia blade station.
Usage
-----
start an IPython (qt)console with the pylab flag:
$ ipython qtconsole --pylab
or
$ ipython --pylab
Then, from the prompt, run this script:
|> %run sandia_blade_lib/prep_stnXX_mesh.py
or
|> import sandia_blade_lib/prep_stnXX_mesh
Author: Perry Roth-Johnson
Last updated: April 10, 2014
"""
import matplotlib.pyplot as plt
import lib.blade as bl
import lib.poly_utils as pu
from shapely.geometry import Polygon
# SET THESE PARAMETERS -----------------
station_num = 23
# --------------------------------------
plt.close('all')
# load the Sandia blade
m = bl.MonoplaneBlade('Sandia blade SNL100-00', 'sandia_blade')
# pre-process the station dimensions
station = m.list_of_stations[station_num-1]
station.airfoil.create_polygon()
station.structure.create_all_layers()
station.structure.save_all_layer_edges()
station.structure.write_all_part_polygons()
# plot the parts
station.plot_parts()
# access the structure for this station
st = station.structure
# upper spar cap -----------------------------------------------------------
label = 'upper spar cap'
# create the bounding polygon
usc = st.spar_cap.layer['upper']
is2 = st.internal_surface_2.layer['resin']
points_usc = [
(-0.75, usc.left[0][1]), # SparCap_upper.txt
(-0.74000000, 0.51408849), # InternalSurface2_resin.txt
( 0.74000000, 0.51291488), # InternalSurface2_resin.txt
( 0.75, usc.right[1][1]), # SparCap_upper.txt
( 0.75, 0.9),
(-0.75, 0.9)
]
bounding_polygon = Polygon(points_usc)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_2, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_2, 'triax', label,
bounding_polygon)
# lower spar cap -----------------------------------------------------------
label = 'lower spar cap'
# create the bounding polygon
lsc = st.spar_cap.layer['lower']
points_lsc = [
(-0.75,-0.9),
( 0.75,-0.9),
(0.75000000, lsc.right[0][1]), # SparCap_lower.txt
(0.74000000, -0.24804750), # InternalSurface2_resin.txt
(-0.74000000, -0.35849733), # InternalSurface2_resin.txt
(-0.75000000, lsc.left[1][1]) # SparCap_lower.txt
]
bounding_polygon = Polygon(points_lsc)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_2, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_2, 'triax', label,
bounding_polygon)
# TE reinforcement, upper 1 ------------------------------------------------
label = 'TE reinforcement, upper 1'
# create the bounding polygon
ter = st.TE_reinforcement.layer['foam']
is4 = st.internal_surface_4.layer['resin']
points_teu1 = [
(ter.top[0][0], 0.35), # TE_Reinforcement_foam.txt
tuple(ter.top[0]), # TE_Reinforcement_foam.txt
is4.polygon.interiors[0].coords[564-214], # InternalSurface4_resin.txt
(2.7, 0.1),
is4.polygon.interiors[0].coords[556-214], # InternalSurface4_resin.txt
(3.14015958, 0.35) # InternalSurface4_resin.txt
]
bounding_polygon = Polygon(points_teu1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_4, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_4, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'foam', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# TE reinforcement, lower 1 ------------------------------------------------
label = 'TE reinforcement, lower 1'
# create the bounding polygon
points_tel1 = [
(ter.bottom[0][0], -0.1), # TE_Reinforcement_foam.txt
tuple(ter.bottom[1]), # TE_Reinforcement_foam.txt
is4.polygon.interiors[0].coords[331-214], # InternalSurface4_resin.txt
points_teu1[-3],
points_teu1[-2], # InternalSurface4_resin.txt
(points_teu1[-1][0], -0.1) # InternalSurface4_resin.txt
]
bounding_polygon = Polygon(points_tel1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_4, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_4, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'foam', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# TE reinforcement, upper 2 ------------------------------------------------
label = 'TE reinforcement, upper 2'
# create the bounding polygon
is4t = st.internal_surface_4.layer['triax']
points_teu2 = [
points_teu1[-1],
points_teu1[-2],
is4t.polygon.interiors[0].coords[284-112], # InternalSurface4_triax.txt
is4t.polygon.exterior.coords[20-3], # InternalSurface4_triax.txt
(is4t.polygon.exterior.coords[20-3][0], 0.35) # InternalSurface4_triax.txt
]
bounding_polygon = Polygon(points_teu2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_4, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_4, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'foam', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# TE reinforcement, lower 2 ------------------------------------------------
label = 'TE reinforcement, lower 2'
# create the bounding polygon
points_tel2 = [
(points_teu2[0][0], -0.1),
points_teu2[1],
points_teu2[2],
points_teu2[3],
(points_teu2[3][0], -0.1)
]
bounding_polygon = Polygon(points_tel2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_4, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_4, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'foam', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# TE reinforcement, upper 3 ------------------------------------------------
label = 'TE reinforcement, upper 3'
# create the bounding polygon
points_teu3 = [
points_teu2[-1],
points_teu2[-2],
(ter.polygon.exterior.coords[0][0], 0.0022), # TE_Reinforcement_foam.txt
(ter.polygon.exterior.coords[0][0], 0.35) # TE_Reinforcement_foam.txt
]
bounding_polygon = Polygon(points_teu3)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'foam', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# TE reinforcement, lower 3 ------------------------------------------------
label = 'TE reinforcement, lower 3'
# create the bounding polygon
points_tel3 = [
(points_teu3[0][0], -0.1),
points_teu3[1],
points_teu3[2],
(points_teu3[2][0], -0.1)
]
bounding_polygon = Polygon(points_tel3)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'foam', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# TE reinforcement, upper 4 ------------------------------------------------
label = 'TE reinforcement, upper 4'
# create the bounding polygon
es = st.external_surface.layer['gelcoat']
teru = st.TE_reinforcement.layer['uniax']
points_teu4 = [
points_teu3[-1],
points_teu3[-2],
(teru.polygon.exterior.coords[-2][0], 0.0022), # TE_Reinforcement_uniax.txt
teru.polygon.exterior.coords[-2], # TE_Reinforcement_uniax.txt
es.polygon.exterior.coords[-2],
(3.4, 0.35) # TE_Reinforcement_uniax.txt
]
bounding_polygon = Polygon(points_teu4)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# TE reinforcement, lower 4 ------------------------------------------------
label = 'TE reinforcement, lower 4'
# create the bounding polygon
points_tel4 = [
(points_teu4[0][0], -0.1),
points_teu4[1],
points_teu4[2],
teru.polygon.exterior.coords[-1], # TE_Reinforcement_uniax.txt
es.polygon.exterior.coords[-1],
(points_teu4[2][0], -0.1)
]
bounding_polygon = Polygon(points_tel4)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.TE_reinforcement, 'uniax', label,
bounding_polygon)
# LE panel -----------------------------------------------------------------
label = 'LE panel'
# create the bounding polygon
lep = st.LE_panel.layer['foam']
is1 = st.internal_surface_1.layer['resin']
points_le = [
(-3.00,-1.6),
(-0.836,-1.6),
tuple(lep.bottom[0]), # LE_Panel_foam.txt
is1.polygon.interiors[0].coords[-2], # InternalSurface1_resin.txt
(-1.5, 0.0),
is1.polygon.interiors[0].coords[-1], # InternalSurface1_resin.txt
tuple(lep.top[1]), # LE_Panel_foam.txt
(-0.836, 1.3),
(-3.00, 1.3)
]
bounding_polygon = Polygon(points_le)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
# upper aft panel 1 -------------------------------------------------------
label = 'upper aft panel 1'
# create the bounding polygon
ap1u = st.aft_panel_1.layer['upper']
is3 = st.internal_surface_3.layer['resin']
points_ap1u = [
(0.836, 1.3),
(ap1u.right[1][0], 1.3), # AftPanel1_upper.txt
tuple(ap1u.right[1]), # AftPanel1_upper.txt
(1.64218500, 0.36594453), # InternalSurface3_resin.txt
(1.2, 0.3),
is3.polygon.interiors[0].coords[-2], # InternalSurface3_resin.txt
tuple(ap1u.left[0]) # AftPanel1_upper.txt
]
bounding_polygon = Polygon(points_ap1u)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_3, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_3, 'triax', label,
bounding_polygon)
# lower aft panel 1 -------------------------------------------------------
label = 'lower aft panel 1'
# create the bounding polygon
ap1l = st.aft_panel_1.layer['lower']
points_ap1l = [
(0.836, -1.6),
(ap1l.right[0][0], -1.6), # AftPanel1_lower.txt
tuple(ap1l.right[0]), # AftPanel1_lower.txt
(1.64218500, -0.05972683), # InternalSurface3_resin.txt
(1.2, 0.0),
is3.polygon.interiors[0].coords[-1], # InternalSurface3_resin.txt
tuple(ap1l.left[1]) # AftPanel1_lower.txt
]
bounding_polygon = Polygon(points_ap1l)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_3, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_3, 'triax', label,
bounding_polygon)
# upper aft panel 2 -------------------------------------------------------
label = 'upper aft panel 2'
# create the bounding polygon
ap2u = st.aft_panel_2.layer['upper']
sw3br = st.shear_web_3.layer['biax, right']
points_ap2u = [
(sw3br.right[0][0], 1.3),
(ap2u.right[1][0], 1.3), # AftPanel2_upper.txt
tuple(ap2u.right[1]), # AftPanel2_upper.txt
(ap2u.right[1][0], 0.14),
(2.0, 0.1),
is4.polygon.interiors[0].coords[-2], # InternalSurface4_resin.txt
tuple(ap2u.left[0]) # AftPanel2_upper.txt
]
bounding_polygon = Polygon(points_ap2u)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_4, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_4, 'triax', label,
bounding_polygon)
# lower aft panel 2 -------------------------------------------------------
label = 'lower aft panel 2'
# create the bounding polygon
ap2l = st.aft_panel_2.layer['lower']
is4 = st.internal_surface_4.layer['resin']
sw3br = st.shear_web_3.layer['biax, right']
points_ap2l = [
(sw3br.right[0][0], -1.6),
(ap2l.right[0][0], -1.6), # AftPanel2_lower.txt
tuple(ap2l.right[0]), # AftPanel2_lower.txt
points_ap2u[-4],
points_ap2u[-3],
is4.polygon.interiors[0].coords[-1], # InternalSurface4_resin.txt
tuple(ap2l.left[1]) # AftPanel2_lower.txt
]
bounding_polygon = Polygon(points_ap2l)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_4, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_4, 'triax', label,
bounding_polygon)
# above shear web 1 ----------------------------------------------------------
label = 'above shear web 1'
# create the bounding polygon
points_asw1 = [
(-0.75, 2.1),
(-0.75, 0.1),
(-0.836, 0.1),
(-0.836, 2.1)
]
bounding_polygon = Polygon(points_asw1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
# below shear web 1 ----------------------------------------------------------
label = 'below shear web 1'
# create the bounding polygon
points_bsw1 = [
(-0.75, -2.1),
(-0.75, -0.1),
(-0.836, -0.1),
(-0.836, -2.1)
]
bounding_polygon = Polygon(points_bsw1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
# above shear web 2 ----------------------------------------------------------
label = 'above shear web 2'
# create the bounding polygon
points_asw2 = [
(0.75, 2.1),
(0.75, 0.1),
(0.836, 0.1),
(0.836, 2.1)
]
bounding_polygon = Polygon(points_asw2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
# below shear web 2 ----------------------------------------------------------
label = 'below shear web 2'
# create the bounding polygon
points_bsw2 = [
(0.75, -2.1),
(0.75, -0.1),
(0.836, -0.1),
(0.836, -2.1)
]
bounding_polygon = Polygon(points_bsw2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
# above shear web 3 ----------------------------------------------------------
label = 'above shear web 3'
# create the bounding polygon
sw3bl = st.shear_web_3.layer['biax, left']
points_asw3 = [
(sw3bl.left[0][0], 1.0),
(sw3bl.left[0][0], 0.1),
(sw3br.right[0][0], 0.1),
(sw3br.right[0][0], 1.0)
]
bounding_polygon = Polygon(points_asw3)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
# below shear web 3 ----------------------------------------------------------
label = 'below shear web 3'
# create the bounding polygon
points_bsw3 = [
(sw3bl.left[0][0], -1.0),
(sw3bl.left[0][0], -0.1),
(sw3br.right[0][0], -0.1),
(sw3br.right[0][0], -1.0)
]
bounding_polygon = Polygon(points_bsw3)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.external_surface, 'triax', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.external_surface, 'gelcoat', label,
bounding_polygon)
# left of shear web 1 -------------------------------------------------------
label = 'left of shear web 1'
# create the bounding polygon
points_lsw1 = points_le[2:-2]
bounding_polygon = Polygon(points_lsw1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_1, 'triax', label,
bounding_polygon)
# right of shear web 1 -------------------------------------------------------
label = 'right of shear web 1'
# create the bounding polygon
points_rsw1 = [
points_usc[0],
points_usc[1],
(0.0, 0.0),
points_lsc[-2],
points_lsc[-1]
]
bounding_polygon = Polygon(points_rsw1)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.internal_surface_2, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_2, 'triax', label,
bounding_polygon)
# left of shear web 2 -------------------------------------------------------
label = 'left of shear web 2'
# create the bounding polygon
points_lsw2 = [
points_usc[3],
points_usc[2],
(0.0, 0.0),
points_lsc[3],
points_lsc[2]
]
bounding_polygon = Polygon(points_lsw2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.internal_surface_2, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_2, 'triax', label,
bounding_polygon)
# right of shear web 2 -------------------------------------------------------
label = 'right of shear web 2'
# create the bounding polygon
points_rsw2 = [
points_ap1u[-1],
points_ap1u[-2],
(1.5, 0.0),
points_ap1l[-2],
points_ap1l[-1]
]
bounding_polygon = Polygon(points_rsw2)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.internal_surface_3, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_3, 'triax', label,
bounding_polygon)
# left of shear web 3 -------------------------------------------------------
label = 'left of shear web 3'
# create the bounding polygon
points_lsw3 = [
points_ap1u[2],
points_ap1u[3],
(1.5, 0.0),
points_ap1l[3],
points_ap1l[2]
]
bounding_polygon = Polygon(points_lsw3)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.internal_surface_3, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_3, 'triax', label,
bounding_polygon)
# right of shear web 3 -------------------------------------------------------
label = 'right of shear web 3'
# create the bounding polygon
points_rsw3 = [
points_ap2u[-1],
points_ap2u[-2],
(2.2, 0.15),
points_ap2l[-2],
points_ap2l[-1]
]
bounding_polygon = Polygon(points_rsw3)
pu.plot_polygon(bounding_polygon, 'None', '#000000')
# cut the new layer polygons
pu.cut_plot_and_write_alt_layer(st.internal_surface_4, 'resin', label,
bounding_polygon)
pu.cut_plot_and_write_alt_layer(st.internal_surface_4, 'triax', label,
bounding_polygon)
# show the plot
plt.show()
# write the TrueGrid input file for mesh generation ---------------------
st.write_truegrid_inputfile(
interrupt_flag=True,
additional_layers=[
st.spar_cap.layer['upper'],
st.spar_cap.layer['lower'],
st.aft_panel_1.layer['upper'],
st.aft_panel_1.layer['lower'],
st.aft_panel_2.layer['upper'],
st.aft_panel_2.layer['lower'],
st.LE_panel.layer['foam'],
st.shear_web_1.layer['biax, left'],
st.shear_web_1.layer['foam'],
st.shear_web_1.layer['biax, right'],
st.shear_web_2.layer['biax, left'],
st.shear_web_2.layer['foam'],
st.shear_web_2.layer['biax, right'],
st.shear_web_3.layer['biax, left'],
st.shear_web_3.layer['foam'],
st.shear_web_3.layer['biax, right']
],
alt_TE_reinforcement=True,
soft_warning=False)
| gpl-3.0 |
xyguo/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 55 | 19053 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features).reshape(1, -1)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)].reshape(1, -1)
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)].reshape(1, -1)
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
@ignore_warnings
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [[1., 0.]]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slightly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
ignore_warnings(lshf.fit)(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consistent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
ignore_warnings(lshf.partial_fit)(X)
assert_array_equal(X, lshf._fit_X)
ignore_warnings(lshf.fit)(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
ignore_warnings(lshf.partial_fit)(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
ignore_warnings(lshf.fit)(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32).reshape(1, -1)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
ignore_warnings(lshf.fit)(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
hawk31/pyGPGO | examples/exampleGBM.py | 1 | 1120 | #######################################
# pyGPGO examples
# exampleGBM: tests the Gradient Boosting Machine surrogate.
#######################################
import numpy as np
from pyGPGO.surrogates.BoostedTrees import BoostedTrees
import matplotlib.pyplot as plt
if __name__ == '__main__':
# Build synthetic data (sine function)
x = np.arange(0, 2 * np.pi + 0.01, step=np.pi / 16)
y = np.sin(x)
X = np.array([np.atleast_2d(u) for u in x])[:, 0]
gbm = BoostedTrees(q2=0.84, q1=0.16)
# Fit the model to the data
gbm.fit(X, y)
# Predict on new data
xstar = np.arange(0, 2 * np.pi, step=0.01)
Xstar = np.array([np.atleast_2d(u) for u in xstar])[:, 0]
ymean, ystd = gbm.predict(Xstar, return_std=True)
# Confidence interval bounds
lower, upper = ymean - 1.96 * ystd, ymean + 1.96 * ystd
# Plot values
plt.figure()
plt.plot(xstar, ymean, label='Posterior mean')
plt.plot(xstar, np.sin(xstar), label='True function')
plt.fill_between(xstar, lower, upper, alpha=0.4, label=r'95% confidence band')
plt.grid()
plt.legend(loc=0)
plt.show() | mit |
automl/paramsklearn | ParamSklearn/components/data_preprocessing/balancing.py | 1 | 4086 | import numpy as np
from HPOlibConfigSpace.configuration_space import ConfigurationSpace
from HPOlibConfigSpace.hyperparameters import CategoricalHyperparameter
from ParamSklearn.components.base import \
ParamSklearnPreprocessingAlgorithm
from ParamSklearn.constants import *
class Balancing(ParamSklearnPreprocessingAlgorithm):
def __init__(self, strategy, random_state=None):
self.strategy = strategy
def fit(self, X, y=None):
return self
def transform(self, X):
return X
def get_weights(self, Y, classifier, preprocessor, init_params, fit_params):
if init_params is None:
init_params = {}
if fit_params is None:
fit_params = {}
# Classifiers which require sample weights:
# We can have adaboost in here, because in the fit method,
# the sample weights are normalized:
# https://github.com/scikit-learn/scikit-learn/blob/0.15.X/sklearn/ensemble/weight_boosting.py#L121
clf_ = ['adaboost', 'gradient_boosting']
pre_ = []
if classifier in clf_ or preprocessor in pre_:
if len(Y.shape) > 1:
offsets = [2 ** i for i in range(Y.shape[1])]
Y_ = np.sum(Y * offsets, axis=1)
else:
Y_ = Y
unique, counts = np.unique(Y_, return_counts=True)
cw = 1. / counts
cw = cw / np.mean(cw)
sample_weights = np.ones(Y_.shape)
for i, ue in enumerate(unique):
mask = Y_ == ue
sample_weights[mask] *= cw[i]
if classifier in clf_:
fit_params['classifier:sample_weight'] = sample_weights
if preprocessor in pre_:
fit_params['preprocessor:sample_weight'] = sample_weights
# Classifiers which can adjust sample weights themselves via the
# argument `class_weight`
clf_ = ['decision_tree', 'extra_trees', 'liblinear_svc',
'libsvm_svc', 'random_forest', 'sgd']
pre_ = ['liblinear_svc_preprocessor',
'extra_trees_preproc_for_classification']
if classifier in clf_:
init_params['classifier:class_weight'] = 'auto'
if preprocessor in pre_:
init_params['preprocessor:class_weight'] = 'auto'
clf_ = ['ridge']
if classifier in clf_:
class_weights = {}
unique, counts = np.unique(Y, return_counts=True)
cw = 1. / counts
cw = cw / np.mean(cw)
for i, ue in enumerate(unique):
class_weights[ue] = cw[i]
if classifier in clf_:
init_params['classifier:class_weight'] = class_weights
return init_params, fit_params
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'Balancing',
'name': 'Balancing Imbalanced Class Distributions',
'handles_missing_values': True,
'handles_nominal_values': True,
'handles_numerical_features': True,
'prefers_data_scaled': False,
'prefers_data_normalized': False,
'handles_regression': False,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': True,
'is_deterministic': True,
'handles_sparse': True,
'handles_dense': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA, SIGNED_DATA),
'output': (INPUT,),
'preferred_dtype': None}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
# TODO add replace by zero!
strategy = CategoricalHyperparameter(
"strategy", ["none", "weighting"], default="none")
cs = ConfigurationSpace()
cs.add_hyperparameter(strategy)
return cs
def __str__(self):
name = self.get_properties()['name']
return "ParamSklearn %s" % name
| bsd-3-clause |
yhpeng-git/mxnet | example/gan/dcgan.py | 14 | 9972 | from __future__ import print_function
import mxnet as mx
import numpy as np
from sklearn.datasets import fetch_mldata
from matplotlib import pyplot as plt
import logging
import cv2
from datetime import datetime
def make_dcgan_sym(ngf, ndf, nc, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12):
BatchNorm = mx.sym.BatchNorm
rand = mx.sym.Variable('rand')
g1 = mx.sym.Deconvolution(rand, name='g1', kernel=(4,4), num_filter=ngf*8, no_bias=no_bias)
gbn1 = BatchNorm(g1, name='gbn1', fix_gamma=fix_gamma, eps=eps)
gact1 = mx.sym.Activation(gbn1, name='gact1', act_type='relu')
g2 = mx.sym.Deconvolution(gact1, name='g2', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=ngf*4, no_bias=no_bias)
gbn2 = BatchNorm(g2, name='gbn2', fix_gamma=fix_gamma, eps=eps)
gact2 = mx.sym.Activation(gbn2, name='gact2', act_type='relu')
g3 = mx.sym.Deconvolution(gact2, name='g3', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=ngf*2, no_bias=no_bias)
gbn3 = BatchNorm(g3, name='gbn3', fix_gamma=fix_gamma, eps=eps)
gact3 = mx.sym.Activation(gbn3, name='gact3', act_type='relu')
g4 = mx.sym.Deconvolution(gact3, name='g4', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=ngf, no_bias=no_bias)
gbn4 = BatchNorm(g4, name='gbn4', fix_gamma=fix_gamma, eps=eps)
gact4 = mx.sym.Activation(gbn4, name='gact4', act_type='relu')
g5 = mx.sym.Deconvolution(gact4, name='g5', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=nc, no_bias=no_bias)
gout = mx.sym.Activation(g5, name='gact5', act_type='tanh')
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
d1 = mx.sym.Convolution(data, name='d1', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=ndf, no_bias=no_bias)
dact1 = mx.sym.LeakyReLU(d1, name='dact1', act_type='leaky', slope=0.2)
d2 = mx.sym.Convolution(dact1, name='d2', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=ndf*2, no_bias=no_bias)
dbn2 = BatchNorm(d2, name='dbn2', fix_gamma=fix_gamma, eps=eps)
dact2 = mx.sym.LeakyReLU(dbn2, name='dact2', act_type='leaky', slope=0.2)
d3 = mx.sym.Convolution(dact2, name='d3', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=ndf*4, no_bias=no_bias)
dbn3 = BatchNorm(d3, name='dbn3', fix_gamma=fix_gamma, eps=eps)
dact3 = mx.sym.LeakyReLU(dbn3, name='dact3', act_type='leaky', slope=0.2)
d4 = mx.sym.Convolution(dact3, name='d4', kernel=(4,4), stride=(2,2), pad=(1,1), num_filter=ndf*8, no_bias=no_bias)
dbn4 = BatchNorm(d4, name='dbn4', fix_gamma=fix_gamma, eps=eps)
dact4 = mx.sym.LeakyReLU(dbn4, name='dact4', act_type='leaky', slope=0.2)
d5 = mx.sym.Convolution(dact4, name='d5', kernel=(4,4), num_filter=1, no_bias=no_bias)
d5 = mx.sym.Flatten(d5)
dloss = mx.sym.LogisticRegressionOutput(data=d5, label=label, name='dloss')
return gout, dloss
def get_mnist():
mnist = fetch_mldata('MNIST original')
np.random.seed(1234) # set seed for deterministic ordering
p = np.random.permutation(mnist.data.shape[0])
X = mnist.data[p]
X = X.reshape((70000, 28, 28))
X = np.asarray([cv2.resize(x, (64,64)) for x in X])
X = X.astype(np.float32)/(255.0/2) - 1.0
X = X.reshape((70000, 1, 64, 64))
X = np.tile(X, (1, 3, 1, 1))
X_train = X[:60000]
X_test = X[60000:]
return X_train, X_test
class RandIter(mx.io.DataIter):
def __init__(self, batch_size, ndim):
self.batch_size = batch_size
self.ndim = ndim
self.provide_data = [('rand', (batch_size, ndim, 1, 1))]
self.provide_label = []
def iter_next(self):
return True
def getdata(self):
return [mx.random.normal(0, 1.0, shape=(self.batch_size, self.ndim, 1, 1))]
class ImagenetIter(mx.io.DataIter):
def __init__(self, path, batch_size, data_shape):
self.internal = mx.io.ImageRecordIter(
path_imgrec = path,
data_shape = data_shape,
batch_size = batch_size,
rand_crop = True,
rand_mirror = True,
max_crop_size = 256,
min_crop_size = 192)
self.provide_data = [('data', (batch_size,) + data_shape)]
self.provide_label = []
def reset(self):
self.internal.reset()
def iter_next(self):
return self.internal.iter_next()
def getdata(self):
data = self.internal.getdata()
data = data * (2.0/255.0)
data -= 1
return [data]
def fill_buf(buf, i, img, shape):
n = buf.shape[0]/shape[1]
m = buf.shape[1]/shape[0]
sx = (i%m)*shape[0]
sy = (i/m)*shape[1]
buf[sy:sy+shape[1], sx:sx+shape[0], :] = img
def visual(title, X):
assert len(X.shape) == 4
X = X.transpose((0, 2, 3, 1))
X = np.clip((X+1.0)*(255.0/2.0), 0, 255).astype(np.uint8)
n = np.ceil(np.sqrt(X.shape[0]))
buff = np.zeros((int(n*X.shape[1]), int(n*X.shape[2]), int(X.shape[3])), dtype=np.uint8)
for i, img in enumerate(X):
fill_buf(buff, i, img, X.shape[1:3])
buff = cv2.cvtColor(buff, cv2.COLOR_BGR2RGB)
plt.imshow(buff)
plt.title(title)
plt.show()
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
# =============setting============
dataset = 'mnist'
imgnet_path = './train.rec'
ndf = 64
ngf = 64
nc = 3
batch_size = 64
Z = 100
lr = 0.0002
beta1 = 0.5
ctx = mx.gpu(0)
check_point = False
symG, symD = make_dcgan_sym(ngf, ndf, nc)
#mx.viz.plot_network(symG, shape={'rand': (batch_size, 100, 1, 1)}).view()
#mx.viz.plot_network(symD, shape={'data': (batch_size, nc, 64, 64)}).view()
# ==============data==============
if dataset == 'mnist':
X_train, X_test = get_mnist()
train_iter = mx.io.NDArrayIter(X_train, batch_size=batch_size)
elif dataset == 'imagenet':
train_iter = ImagenetIter(imgnet_path, batch_size, (3, 64, 64))
rand_iter = RandIter(batch_size, Z)
label = mx.nd.zeros((batch_size,), ctx=ctx)
# =============module G=============
modG = mx.mod.Module(symbol=symG, data_names=('rand',), label_names=None, context=ctx)
modG.bind(data_shapes=rand_iter.provide_data)
modG.init_params(initializer=mx.init.Normal(0.02))
modG.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'wd': 0.,
'beta1': beta1,
})
mods = [modG]
# =============module D=============
modD = mx.mod.Module(symbol=symD, data_names=('data',), label_names=('label',), context=ctx)
modD.bind(data_shapes=train_iter.provide_data,
label_shapes=[('label', (batch_size,))],
inputs_need_grad=True)
modD.init_params(initializer=mx.init.Normal(0.02))
modD.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'wd': 0.,
'beta1': beta1,
})
mods.append(modD)
# ============printing==============
def norm_stat(d):
return mx.nd.norm(d)/np.sqrt(d.size)
mon = mx.mon.Monitor(10, norm_stat, pattern=".*output|d1_backward_data", sort=True)
mon = None
if mon is not None:
for mod in mods:
pass
def facc(label, pred):
pred = pred.ravel()
label = label.ravel()
return ((pred > 0.5) == label).mean()
def fentropy(label, pred):
pred = pred.ravel()
label = label.ravel()
return -(label*np.log(pred+1e-12) + (1.-label)*np.log(1.-pred+1e-12)).mean()
mG = mx.metric.CustomMetric(fentropy)
mD = mx.metric.CustomMetric(fentropy)
mACC = mx.metric.CustomMetric(facc)
print('Training...')
stamp = datetime.now().strftime('%Y_%m_%d-%H_%M')
# =============train===============
for epoch in range(100):
train_iter.reset()
for t, batch in enumerate(train_iter):
rbatch = rand_iter.next()
if mon is not None:
mon.tic()
modG.forward(rbatch, is_train=True)
outG = modG.get_outputs()
# update discriminator on fake
label[:] = 0
modD.forward(mx.io.DataBatch(outG, [label]), is_train=True)
modD.backward()
#modD.update()
gradD = [[grad.copyto(grad.context) for grad in grads] for grads in modD._exec_group.grad_arrays]
modD.update_metric(mD, [label])
modD.update_metric(mACC, [label])
# update discriminator on real
label[:] = 1
batch.label = [label]
modD.forward(batch, is_train=True)
modD.backward()
for gradsr, gradsf in zip(modD._exec_group.grad_arrays, gradD):
for gradr, gradf in zip(gradsr, gradsf):
gradr += gradf
modD.update()
modD.update_metric(mD, [label])
modD.update_metric(mACC, [label])
# update generator
label[:] = 1
modD.forward(mx.io.DataBatch(outG, [label]), is_train=True)
modD.backward()
diffD = modD.get_input_grads()
modG.backward(diffD)
modG.update()
mG.update([label], modD.get_outputs())
if mon is not None:
mon.toc_print()
t += 1
if t % 10 == 0:
print('epoch:', epoch, 'iter:', t, 'metric:', mACC.get(), mG.get(), mD.get())
mACC.reset()
mG.reset()
mD.reset()
visual('gout', outG[0].asnumpy())
diff = diffD[0].asnumpy()
diff = (diff - diff.mean())/diff.std()
visual('diff', diff)
visual('data', batch.data[0].asnumpy())
if check_point:
print('Saving...')
modG.save_params('%s_G_%s-%04d.params'%(dataset, stamp, epoch))
modD.save_params('%s_D_%s-%04d.params'%(dataset, stamp, epoch))
| apache-2.0 |
cpcloud/blaze | blaze/compute/tests/test_numpy_compute.py | 2 | 19736 | from __future__ import absolute_import, division, print_function
import pytest
import itertools
import numpy as np
import pandas as pd
from datetime import datetime, date
from blaze.compute.core import compute, compute_up
from blaze.expr import symbol, by, exp, summary, Broadcast, join, concat
from blaze.expr import greatest, least
from blaze import sin
import blaze
from odo import into
from datashape import discover, to_numpy, dshape
x = np.array([(1, 'Alice', 100),
(2, 'Bob', -200),
(3, 'Charlie', 300),
(4, 'Denis', 400),
(5, 'Edith', -500)],
dtype=[('id', 'i8'), ('name', 'S7'), ('amount', 'i8')])
t = symbol('t', discover(x))
def eq(a, b):
c = a == b
if isinstance(c, np.ndarray):
return c.all()
return c
def test_symbol():
assert eq(compute(t, x), x)
def test_eq():
assert eq(compute(t['amount'] == 100, x),
x['amount'] == 100)
def test_selection():
assert eq(compute(t[t['amount'] == 100], x), x[x['amount'] == 0])
assert eq(compute(t[t['amount'] < 0], x), x[x['amount'] < 0])
def test_arithmetic():
assert eq(compute(t['amount'] + t['id'], x),
x['amount'] + x['id'])
assert eq(compute(t['amount'] * t['id'], x),
x['amount'] * x['id'])
assert eq(compute(t['amount'] % t['id'], x),
x['amount'] % x['id'])
def test_UnaryOp():
assert eq(compute(exp(t['amount']), x),
np.exp(x['amount']))
assert eq(compute(abs(-t['amount']), x),
abs(-x['amount']))
def test_Neg():
assert eq(compute(-t['amount'], x),
-x['amount'])
def test_invert_not():
assert eq(compute(~(t.amount > 0), x),
~(x['amount'] > 0))
def test_Reductions():
assert compute(t['amount'].mean(), x) == x['amount'].mean()
assert compute(t['amount'].count(), x) == len(x['amount'])
assert compute(t['amount'].sum(), x) == x['amount'].sum()
assert compute(t['amount'].min(), x) == x['amount'].min()
assert compute(t['amount'].max(), x) == x['amount'].max()
assert compute(t['amount'].nunique(), x) == len(np.unique(x['amount']))
assert compute(t['amount'].var(), x) == x['amount'].var()
assert compute(t['amount'].std(), x) == x['amount'].std()
assert compute(t['amount'].var(unbiased=True), x) == x['amount'].var(ddof=1)
assert compute(t['amount'].std(unbiased=True), x) == x['amount'].std(ddof=1)
assert compute((t['amount'] > 150).any(), x) == True
assert compute((t['amount'] > 250).all(), x) == False
assert compute(t['amount'][0], x) == x['amount'][0]
assert compute(t['amount'][-1], x) == x['amount'][-1]
def test_count_string():
s = symbol('name', 'var * ?string')
x = np.array(['Alice', np.nan, 'Bob', 'Denis', 'Edith'], dtype='object')
assert compute(s.count(), x) == 4
def test_reductions_on_recarray():
assert compute(t.count(), x) == len(x)
def test_count_nan():
t = symbol('t', '3 * ?real')
x = np.array([1.0, np.nan, 2.0])
assert compute(t.count(), x) == 2
def test_distinct():
x = np.array([('Alice', 100),
('Alice', -200),
('Bob', 100),
('Bob', 100)],
dtype=[('name', 'S5'), ('amount', 'i8')])
t = symbol('t', 'var * {name: string, amount: int64}')
assert eq(compute(t['name'].distinct(), x),
np.unique(x['name']))
assert eq(compute(t.distinct(), x),
np.unique(x))
def test_distinct_on_recarray():
rec = pd.DataFrame(
[[0, 1],
[0, 2],
[1, 1],
[1, 2]],
columns=('a', 'b'),
).to_records(index=False)
s = symbol('s', discover(rec))
assert (
compute(s.distinct('a'), rec) ==
pd.DataFrame(
[[0, 1],
[1, 1]],
columns=('a', 'b'),
).to_records(index=False)
).all()
def test_distinct_on_structured_array():
arr = np.array(
[(0., 1.),
(0., 2.),
(1., 1.),
(1., 2.)],
dtype=[('a', 'f4'), ('b', 'f4')],
)
s = symbol('s', discover(arr))
assert(
compute(s.distinct('a'), arr) ==
np.array([(0., 1.), (1., 1.)], dtype=arr.dtype)
).all()
def test_distinct_on_str():
rec = pd.DataFrame(
[['a', 'a'],
['a', 'b'],
['b', 'a'],
['b', 'b']],
columns=('a', 'b'),
).to_records(index=False).astype([('a', '<U1'), ('b', '<U1')])
s = symbol('s', discover(rec))
assert (
compute(s.distinct('a'), rec) ==
pd.DataFrame(
[['a', 'a'],
['b', 'a']],
columns=('a', 'b'),
).to_records(index=False).astype([('a', '<U1'), ('b', '<U1')])
).all()
def test_sort():
assert eq(compute(t.sort('amount'), x),
np.sort(x, order='amount'))
assert eq(compute(t.sort('amount', ascending=False), x),
np.sort(x, order='amount')[::-1])
assert eq(compute(t.sort(['amount', 'id']), x),
np.sort(x, order=['amount', 'id']))
assert eq(compute(t.amount.sort(), x),
np.sort(x['amount']))
def test_head():
assert eq(compute(t.head(2), x),
x[:2])
def test_tail():
assert eq(compute(t.tail(2), x),
x[-2:])
def test_label():
expected = x['amount'] * 10
expected = np.array(expected, dtype=[('foo', 'i8')])
assert eq(compute((t['amount'] * 10).label('foo'), x),
expected)
def test_relabel():
expected = np.array(x, dtype=[('ID', 'i8'), ('NAME', 'S7'), ('amount', 'i8')])
result = compute(t.relabel({'name': 'NAME', 'id': 'ID'}), x)
assert result.dtype.names == expected.dtype.names
assert eq(result, expected)
def test_by():
expr = by(t.amount > 0, count=t.id.count())
result = compute(expr, x)
assert set(map(tuple, into(list, result))) == set([(False, 2), (True, 3)])
def test_compute_up_field():
assert eq(compute(t['name'], x), x['name'])
def test_compute_up_projection():
assert eq(compute_up(t[['name', 'amount']], x), x[['name', 'amount']])
ax = np.arange(30, dtype='f4').reshape((5, 3, 2))
a = symbol('a', discover(ax))
def test_slice():
inds = [0, slice(2), slice(1, 3), slice(None, None, 2), [1, 2, 3],
(0, 1), (0, slice(1, 3)), (slice(0, 3), slice(3, 1, -1)),
(0, [1, 2])]
for s in inds:
assert (compute(a[s], ax) == ax[s]).all()
def test_array_reductions():
for axis in [None, 0, 1, (0, 1), (2, 1)]:
assert eq(compute(a.sum(axis=axis), ax), ax.sum(axis=axis))
assert eq(compute(a.std(axis=axis), ax), ax.std(axis=axis))
def test_array_reductions_with_keepdims():
for axis in [None, 0, 1, (0, 1), (2, 1)]:
assert eq(compute(a.sum(axis=axis, keepdims=True), ax),
ax.sum(axis=axis, keepdims=True))
def test_summary_on_ndarray():
assert compute(summary(total=a.sum(), min=a.min()), ax) == \
(ax.min(), ax.sum())
result = compute(summary(total=a.sum(), min=a.min(), keepdims=True), ax)
expected = np.array([(ax.min(), ax.sum())],
dtype=[('min', 'float32'), ('total', 'float64')])
assert result.ndim == ax.ndim
assert eq(expected, result)
def test_summary_on_ndarray_with_axis():
for axis in [0, 1, (1, 0)]:
expr = summary(total=a.sum(), min=a.min(), axis=axis)
result = compute(expr, ax)
shape, dtype = to_numpy(expr.dshape)
expected = np.empty(shape=shape, dtype=dtype)
expected['total'] = ax.sum(axis=axis)
expected['min'] = ax.min(axis=axis)
assert eq(result, expected)
def test_utcfromtimestamp():
t = symbol('t', '1 * int64')
data = np.array([0, 1])
expected = np.array(['1970-01-01T00:00:00Z', '1970-01-01T00:00:01Z'],
dtype='M8[us]')
assert eq(compute(t.utcfromtimestamp, data), expected)
def test_nelements_structured_array():
assert compute(t.nelements(), x) == len(x)
assert compute(t.nelements(keepdims=True), x) == (len(x),)
def test_nelements_array():
t = symbol('t', '5 * 4 * 3 * float64')
x = np.random.randn(*t.shape)
result = compute(t.nelements(axis=(0, 1)), x)
np.testing.assert_array_equal(result, np.array([20, 20, 20]))
result = compute(t.nelements(axis=1), x)
np.testing.assert_array_equal(result, 4 * np.ones((5, 3)))
def test_nrows():
assert compute(t.nrows, x) == len(x)
dts = np.array(['2000-06-25T12:30:04Z', '2000-06-28T12:50:05Z'],
dtype='M8[us]')
s = symbol('s', 'var * datetime')
def test_datetime_truncation():
assert eq(compute(s.truncate(1, 'day'), dts),
dts.astype('M8[D]'))
assert eq(compute(s.truncate(2, 'seconds'), dts),
np.array(['2000-06-25T12:30:04Z', '2000-06-28T12:50:04Z'],
dtype='M8[s]'))
assert eq(compute(s.truncate(2, 'weeks'), dts),
np.array(['2000-06-18', '2000-06-18'], dtype='M8[D]'))
assert into(list, compute(s.truncate(1, 'week'), dts))[0].isoweekday() == 7
def test_hour():
dts = [datetime(2000, 6, 20, 1, 00, 00),
datetime(2000, 6, 20, 12, 59, 59),
datetime(2000, 6, 20, 12, 00, 00),
datetime(2000, 6, 20, 11, 59, 59)]
dts = into(np.ndarray, dts)
assert eq(compute(s.truncate(1, 'hour'), dts),
into(np.ndarray, [datetime(2000, 6, 20, 1, 0),
datetime(2000, 6, 20, 12, 0),
datetime(2000, 6, 20, 12, 0),
datetime(2000, 6, 20, 11, 0)]))
def test_month():
dts = [datetime(2000, 7, 1),
datetime(2000, 6, 30),
datetime(2000, 6, 1),
datetime(2000, 5, 31)]
dts = into(np.ndarray, dts)
assert eq(compute(s.truncate(1, 'month'), dts),
into(np.ndarray, [date(2000, 7, 1),
date(2000, 6, 1),
date(2000, 6, 1),
date(2000, 5, 1)]))
def test_truncate_on_np_datetime64_scalar():
s = symbol('s', 'datetime')
data = np.datetime64('2000-01-02T12:30:00Z')
assert compute(s.truncate(1, 'day'), data) == data.astype('M8[D]')
def test_numpy_and_python_datetime_truncate_agree_on_start_of_week():
s = symbol('s', 'datetime')
n = np.datetime64('2014-11-11')
p = datetime(2014, 11, 11)
expr = s.truncate(1, 'week')
assert compute(expr, n) == compute(expr, p)
def test_add_multiple_ndarrays():
a = symbol('a', '5 * 4 * int64')
b = symbol('b', '5 * 4 * float32')
x = np.arange(9, dtype='int64').reshape(3, 3)
y = (x + 1).astype('float32')
expr = sin(a) + 2 * b
scope = {a: x, b: y}
expected = sin(x) + 2 * y
# check that we cast correctly
assert expr.dshape == dshape('5 * 4 * float64')
np.testing.assert_array_equal(compute(expr, scope), expected)
np.testing.assert_array_equal(compute(expr, scope, optimize=False),
expected)
nA = np.arange(30, dtype='f4').reshape((5, 6))
ny = np.arange(6, dtype='f4')
A = symbol('A', discover(nA))
y = symbol('y', discover(ny))
def test_transpose():
assert eq(compute(A.T, nA), nA.T)
assert eq(compute(A.transpose((0, 1)), nA), nA)
def test_dot():
assert eq(compute(y.dot(y), {y: ny}), np.dot(ny, ny))
assert eq(compute(A.dot(y), {A: nA, y: ny}), np.dot(nA, ny))
def test_subexpr_datetime():
data = pd.date_range(start='01/01/2010', end='01/04/2010', freq='D').values
s = symbol('s', discover(data))
result = compute(s.truncate(days=2).day, data)
expected = np.array([31, 2, 2, 4])
np.testing.assert_array_equal(result, expected)
def test_mixed_types():
x = np.array([[(4, 180), (4, 184), (4, 188), (4, 192), (4, 196)],
[(4, 660), (4, 664), (4, 668), (4, 672), (4, 676)],
[(4, 1140), (4, 1144), (4, 1148), (4, 1152), (4, 1156)],
[(4, 1620), (4, 1624), (4, 1628), (4, 1632), (4, 1636)],
[(4, 2100), (4, 2104), (4, 2108), (4, 2112), (4, 2116)]],
dtype=[('count', '<i4'), ('total', '<i8')])
aggregate = symbol('aggregate', discover(x))
result = compute(aggregate.total.sum(axis=(0,)) /
aggregate['count'].sum(axis=(0,)), x)
expected = (x['total'].sum(axis=0, keepdims=True) /
x['count'].sum(axis=0, keepdims=True)).squeeze()
np.testing.assert_array_equal(result, expected)
def test_broadcast_compute_against_numbers_and_arrays():
A = symbol('A', '5 * float32')
a = symbol('a', 'float32')
b = symbol('b', 'float32')
x = np.arange(5, dtype='f4')
expr = Broadcast((A, b), (a, b), a + b)
result = compute(expr, {A: x, b: 10})
assert eq(result, x + 10)
def test_map():
pytest.importorskip('numba')
a = np.arange(10.0)
f = lambda x: np.sin(x) + 1.03 * np.cos(x) ** 2
x = symbol('x', discover(a))
expr = x.map(f, 'float64')
result = compute(expr, a)
expected = f(a)
# make sure we're not going to pandas here
assert type(result) == np.ndarray
assert type(result) == type(expected)
np.testing.assert_array_equal(result, expected)
def test_vector_norm():
x = np.arange(30).reshape((5, 6))
s = symbol('x', discover(x))
assert eq(compute(s.vnorm(), x),
np.linalg.norm(x))
assert eq(compute(s.vnorm(ord=1), x),
np.linalg.norm(x.flatten(), ord=1))
assert eq(compute(s.vnorm(ord=4, axis=0), x),
np.linalg.norm(x, ord=4, axis=0))
expr = s.vnorm(ord=4, axis=0, keepdims=True)
assert expr.shape == compute(expr, x).shape
def test_join():
cities = np.array([('Alice', 'NYC'),
('Alice', 'LA'),
('Bob', 'Chicago')],
dtype=[('name', 'S7'), ('city', 'O')])
c = symbol('cities', discover(cities))
expr = join(t, c, 'name')
result = compute(expr, {t: x, c: cities})
assert (b'Alice', 1, 100, 'LA') in into(list, result)
def test_query_with_strings():
b = np.array([('a', 1), ('b', 2), ('c', 3)],
dtype=[('x', 'S1'), ('y', 'i4')])
s = symbol('s', discover(b))
assert compute(s[s.x == b'b'], b).tolist() == [(b'b', 2)]
@pytest.mark.parametrize('keys', [['a'], list('bc')])
def test_isin(keys):
b = np.array([('a', 1), ('b', 2), ('c', 3), ('a', 4), ('c', 5), ('b', 6)],
dtype=[('x', 'S1'), ('y', 'i4')])
s = symbol('s', discover(b))
result = compute(s.x.isin(keys), b)
expected = np.in1d(b['x'], keys)
np.testing.assert_array_equal(result, expected)
def test_nunique_recarray():
b = np.array([('a', 1), ('b', 2), ('c', 3), ('a', 4), ('c', 5), ('b', 6),
('a', 1), ('b', 2)],
dtype=[('x', 'S1'), ('y', 'i4')])
s = symbol('s', discover(b))
expr = s.nunique()
assert compute(expr, b) == len(np.unique(b))
def test_str_repeat():
a = np.array(('a', 'b', 'c'))
s = symbol('s', discover(a))
expr = s.repeat(3)
assert all(compute(expr, a) == np.char.multiply(a, 3))
def test_str_interp():
a = np.array(('%s', '%s', '%s'))
s = symbol('s', discover(a))
expr = s.interp(1)
assert all(compute(expr, a) == np.char.mod(a, 1))
def test_timedelta_arith():
dates = np.arange('2014-01-01', '2014-02-01', dtype='datetime64')
delta = np.timedelta64(1, 'D')
sym = symbol('s', discover(dates))
assert (compute(sym + delta, dates) == dates + delta).all()
assert (compute(sym - delta, dates) == dates - delta).all()
def test_coerce():
x = np.arange(1, 3)
s = symbol('s', discover(x))
np.testing.assert_array_equal(compute(s.coerce('float64'), x),
np.arange(1.0, 3.0))
def test_concat_arr():
s_data = np.arange(15)
t_data = np.arange(15, 30)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
assert (
compute(concat(s, t), {s: s_data, t: t_data}) ==
np.arange(30)
).all()
def test_concat_mat():
s_data = np.arange(15).reshape(5, 3)
t_data = np.arange(15, 30).reshape(5, 3)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
assert (
compute(concat(s, t), {s: s_data, t: t_data}) ==
np.arange(30).reshape(10, 3)
).all()
assert (
compute(concat(s, t, axis=1), {s: s_data, t: t_data}) ==
np.concatenate((s_data, t_data), axis=1)
).all()
@pytest.mark.parametrize('dtype', ['int64', 'float64'])
def test_least(dtype):
s_data = np.arange(15, dtype=dtype).reshape(5, 3)
t_data = np.arange(15, 30, dtype=dtype).reshape(5, 3)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
expr = least(s, t)
result = compute(expr, {s: s_data, t: t_data})
expected = np.minimum(s_data, t_data)
assert np.all(result == expected)
@pytest.mark.parametrize('dtype', ['int64', 'float64'])
def test_least_mixed(dtype):
s_data = np.array([2, 1], dtype=dtype)
t_data = np.array([1, 2], dtype=dtype)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
expr = least(s, t)
result = compute(expr, {s: s_data, t: t_data})
expected = np.minimum(s_data, t_data)
assert np.all(result == expected)
@pytest.mark.parametrize('dtype', ['int64', 'float64'])
def test_greatest(dtype):
s_data = np.arange(15, dtype=dtype).reshape(5, 3)
t_data = np.arange(15, 30, dtype=dtype).reshape(5, 3)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
expr = greatest(s, t)
result = compute(expr, {s: s_data, t: t_data})
expected = np.maximum(s_data, t_data)
assert np.all(result == expected)
@pytest.mark.parametrize('dtype', ['int64', 'float64'])
def test_greatest_mixed(dtype):
s_data = np.array([2, 1], dtype=dtype)
t_data = np.array([1, 2], dtype=dtype)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
expr = greatest(s, t)
result = compute(expr, {s: s_data, t: t_data})
expected = np.maximum(s_data, t_data)
assert np.all(result == expected)
binary_name_map = {
'atan2': 'arctan2'
}
@pytest.mark.parametrize(
['func', 'kwargs'],
itertools.product(['copysign', 'ldexp'], [dict(optimize=False), dict()])
)
def test_binary_math(func, kwargs):
s_data = np.arange(15).reshape(5, 3)
t_data = np.arange(15, 30).reshape(5, 3)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
scope = {s: s_data, t: t_data}
result = compute(getattr(blaze, func)(s, t), scope, **kwargs)
expected = getattr(np, binary_name_map.get(func, func))(s_data, t_data)
np.testing.assert_equal(result, expected)
@pytest.mark.parametrize(
['func', 'kwargs'],
itertools.product(['atan2', 'hypot'], [dict(optimize=False), dict()])
)
def test_floating_binary_math(func, kwargs):
s_data = np.arange(15).reshape(5, 3)
t_data = np.arange(15, 30).reshape(5, 3)
s = symbol('s', discover(s_data))
t = symbol('t', discover(t_data))
scope = {s: s_data, t: t_data}
result = compute(getattr(blaze, func)(s, t), scope, **kwargs)
expected = getattr(np, binary_name_map.get(func, func))(s_data, t_data)
np.testing.assert_allclose(result, expected)
def test_selection_inner_inputs():
s_data = np.arange(5).reshape(5, 1)
t_data = np.arange(5).reshape(5, 1)
s = symbol('s', 'var * {a: int64}')
t = symbol('t', 'var * {a: int64}')
assert (
compute(s[s.a == t.a], {s: s_data, t: t_data}) ==
s_data
).all()
| bsd-3-clause |
Windy-Ground/scikit-learn | examples/applications/plot_outlier_detection_housing.py | 243 | 5577 | """
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the Boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM algorithm
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwidth parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print(__doc__)
# Author: Virgile Fritsch <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list( legend1.values() )
legend1_keys_list = list( legend1.keys() )
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teacher ratio by town")
legend2_values_list = list( legend2.values() )
legend2_keys_list = list( legend2.keys() )
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_values_list[0], legend2_values_list[1], legend2_values_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
| bsd-3-clause |
elkingtonmcb/scikit-learn | examples/tree/plot_tree_regression.py | 206 | 1476 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="data")
plt.plot(X_test, y_1, c="g", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, c="r", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
AnasGhrab/scikit-learn | sklearn/ensemble/tests/test_weight_boosting.py | 35 | 16763 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(LinearRegression(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(LinearRegression())
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
| bsd-3-clause |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/pandas/core/dtypes/api.py | 16 | 2399 | # flake8: noqa
import sys
from .common import (pandas_dtype,
is_dtype_equal,
is_extension_type,
# categorical
is_categorical,
is_categorical_dtype,
# interval
is_interval,
is_interval_dtype,
# datetimelike
is_datetimetz,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_datetime64_any_dtype,
is_datetime64_ns_dtype,
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_period,
is_period_dtype,
# string-like
is_string_dtype,
is_object_dtype,
# sparse
is_sparse,
# numeric types
is_scalar,
is_sparse,
is_bool,
is_integer,
is_float,
is_complex,
is_number,
is_integer_dtype,
is_int64_dtype,
is_numeric_dtype,
is_float_dtype,
is_bool_dtype,
is_complex_dtype,
is_signed_integer_dtype,
is_unsigned_integer_dtype,
# like
is_re,
is_re_compilable,
is_dict_like,
is_iterator,
is_file_like,
is_list_like,
is_hashable,
is_named_tuple)
# deprecated
m = sys.modules['pandas.core.dtypes.api']
for t in ['is_any_int_dtype', 'is_floating_dtype', 'is_sequence']:
def outer(t=t):
def wrapper(arr_or_dtype):
import warnings
import pandas
warnings.warn("{t} is deprecated and will be "
"removed in a future version".format(t=t),
FutureWarning, stacklevel=3)
return getattr(pandas.core.dtypes.common, t)(arr_or_dtype)
return wrapper
setattr(m, t, outer(t))
del sys, m, t, outer
| mit |
materialsproject/pymatgen | pymatgen/phonon/tests/test_plotter.py | 5 | 3805 | import json
import os
import unittest
from io import open
from pymatgen.phonon.bandstructure import PhononBandStructureSymmLine
from pymatgen.phonon.dos import CompletePhononDos
from pymatgen.phonon.plotter import PhononBSPlotter, PhononDosPlotter, ThermoPlotter
from pymatgen.util.testing import PymatgenTest
class PhononDosPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "NaCl_complete_ph_dos.json"), "r") as f:
self.dos = CompletePhononDos.from_dict(json.load(f))
self.plotter = PhononDosPlotter(sigma=0.2, stack=True)
self.plotter_nostack = PhononDosPlotter(sigma=0.2, stack=False)
def test_add_dos_dict(self):
d = self.plotter.get_dos_dict()
self.assertEqual(len(d), 0)
self.plotter.add_dos_dict(self.dos.get_element_dos(), key_sort_func=lambda x: x.X)
d = self.plotter.get_dos_dict()
self.assertEqual(len(d), 2)
def test_get_dos_dict(self):
self.plotter.add_dos_dict(self.dos.get_element_dos(), key_sort_func=lambda x: x.X)
d = self.plotter.get_dos_dict()
for el in ["Na", "Cl"]:
self.assertIn(el, d)
def test_plot(self):
# Disabling latex for testing.
from matplotlib import rc
rc("text", usetex=False)
self.plotter.add_dos("Total", self.dos)
self.plotter.get_plot(units="mev")
self.plotter_nostack.add_dos("Total", self.dos)
self.plotter_nostack.get_plot(units="mev")
class PhononBSPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "NaCl_phonon_bandstructure.json"), "r") as f:
d = json.loads(f.read())
self.bs = PhononBandStructureSymmLine.from_dict(d)
self.plotter = PhononBSPlotter(self.bs)
def test_bs_plot_data(self):
self.assertEqual(
len(self.plotter.bs_plot_data()["distances"][0]),
51,
"wrong number of distances in the first branch",
)
self.assertEqual(len(self.plotter.bs_plot_data()["distances"]), 4, "wrong number of branches")
self.assertEqual(
sum([len(e) for e in self.plotter.bs_plot_data()["distances"]]),
204,
"wrong number of distances",
)
self.assertEqual(self.plotter.bs_plot_data()["ticks"]["label"][4], "Y", "wrong tick label")
self.assertEqual(
len(self.plotter.bs_plot_data()["ticks"]["label"]),
8,
"wrong number of tick labels",
)
def test_plot(self):
# Disabling latex for testing.
from matplotlib import rc
rc("text", usetex=False)
self.plotter.get_plot(units="mev")
def test_plot_compare(self):
# Disabling latex for testing.
from matplotlib import rc
rc("text", usetex=False)
self.plotter.plot_compare(self.plotter, units="mev")
class ThermoPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "NaCl_complete_ph_dos.json"), "r") as f:
self.dos = CompletePhononDos.from_dict(json.load(f))
self.plotter = ThermoPlotter(self.dos, self.dos.structure)
def test_plot_functions(self):
# Disabling latex for testing.
from matplotlib import rc
rc("text", usetex=False)
self.plotter.plot_cv(5, 100, 5, show=False)
self.plotter.plot_entropy(5, 100, 5, show=False)
self.plotter.plot_internal_energy(5, 100, 5, show=False)
self.plotter.plot_helmholtz_free_energy(5, 100, 5, show=False)
self.plotter.plot_thermodynamic_properties(5, 100, 5, show=False, fig_close=True)
if __name__ == "__main__":
unittest.main()
| mit |
CalebHarada/DCT-photometry | Junk/aperture_photometry.py | 1 | 9318 | from astroquery.simbad import Simbad
from astropy.io import fits
from astropy.wcs import WCS
from astropy.stats import mad_std
from astropy.table import Table
from photutils import fit_2dgaussian, CircularAperture, CircularAnnulus, aperture_photometry, find_peaks
import numpy as np
import os
import matplotlib.pyplot as plt
target_simbad = '...' # name in Simbad
directory = '...' # directory containing FITS images
save_to = '...' # save to this directory
filter = '...' # read from FITS header
########################################################################################################################
Simbad.add_votable_fields('coo(fk5)','propermotions')
def calc_electrons(file, simbad):
# Read in relevant information
hdu = fits.open(file)
data = hdu[0].data
error = hdu[2].data
wcs = WCS(hdu[0].header)
targinfo = Simbad.query_object(simbad)
# Determine RA/Dec of target from Simbad query
targinfo_RA = targinfo['RA_fk5'][0]
targRA = [float(targinfo_RA[:2]), float(targinfo_RA[3:5]), float(targinfo_RA[6:])]
RAdeg = targRA[0]*15 + targRA[1]/4 + targRA[2]/240
dRA = targinfo['PMRA'][0] * (15/3600000.0) # minor correction for proper motion
RA = RAdeg + dRA
targinfo_Dec = targinfo['DEC_fk5'][0]
targDec = [float(targinfo_Dec[1:3]), float(targinfo_Dec[4:6]), float(targinfo_Dec[7:])]
Decdeg = (targDec[0]) + targDec[1]/60 + targDec[2]/3600
if targinfo_Dec[0] == '-':
Decdeg = np.negative(Decdeg) # makes negative declinations negative
dDec = targinfo['PMDEC'][0] * (15/3600000.0)
Dec = Decdeg + dDec
# Convert RA/Dec to pixels
pix = wcs.all_world2pix(RA,Dec,0)
xpix = int(pix[0]) # adding constants because reference pixel appears to be off (regardless of target)
ypix = int(pix[1])
# Trim data to 100x100 pixels near target; fit 2D Gaussian to find center pixel of target
centzoom = data[ypix-45:ypix+45, xpix-45:xpix+45]
centroid = fit_2dgaussian(centzoom)
xcent = xpix - 45 + int(centroid.x_mean.value)
ycent = ypix - 45 + int(centroid.y_mean.value)
#plt.figure()
#plt.imshow(centzoom, origin='lower', cmap='gray', vmin=np.median(data), vmax=np.median(data) + 200)
#plt.colorbar()
#plt.show()
# Find max pixel value in zoomed area, median of data, and median absolute deviation of data
peak = np.max(centzoom)
median = np.median(data) # global background estimate
sigma = mad_std(data) # like std of background, but more resilient to outliers
# Find an appropriate aperture radius
radius = 1
an_mean = peak # peak is just a starting value that will always be greater than the median
while an_mean > median + sigma:
annulus = CircularAnnulus((xcent, ycent), r_in=radius, r_out=radius + 1)
an_sum = aperture_photometry(data,annulus)
an_mean = an_sum['aperture_sum'][0] / annulus.area()
radius += 1 # radius is selected once mean pix value inside annulus is within 2 sigma of median
radius = 35
# Draw aperture around target, sum pixel values, and calculate error
aperture = CircularAperture((xcent, ycent), r=radius)
ap_table = aperture_photometry(data, aperture, error=error)
ap_sum = ap_table['aperture_sum'][0]
ap_error = ap_table['aperture_sum_err'][0]
#print ap_table
#plt.imshow(data, origin='lower', interpolation='nearest', vmin=np.median(data), vmax=np.median(data) + 200)
#aperture.plot()
#plt.show()
# Find appropriate sky aperture, sum pixel values, calculate error
def find_sky():
apzoom = data[ycent-250:ycent+250, xcent-250:xcent+250] # trim data to 400x400 region centered on target
errorzoom = error[ycent-250:ycent+250, xcent-250:xcent+250]
rand_x = np.random.randint(0,500) # randomly select pixel in region
rand_y = np.random.randint(0,500)
if rand_x in range(250-3*radius,250+3*radius)\
or rand_y in range(250-3*radius,250+3*radius):
return find_sky() # reselect pixels if aperture overlaps target
elif rand_x not in range(2*radius, 500-2*radius)\
or rand_y not in range(2*radius, 500-2*radius):
return find_sky()
else:
sky = CircularAperture((rand_x,rand_y), r=radius)
sky_table = aperture_photometry(apzoom, sky, error=errorzoom)
sky_sum = sky_table['aperture_sum'][0]
sky_error = sky_table['aperture_sum_err'][0]
sky_x = int(sky_table['xcenter'][0].value)
sky_y = int(sky_table['ycenter'][0].value)
sky_zoom = apzoom[sky_y-radius:sky_y+radius, sky_x - radius:sky_x + radius]
sky_avg = sky_sum/sky.area() # reselect pixels if bright source is in aperture
if np.max(sky_zoom) < median + 5*sigma and sky_avg > 0:
#plt.imshow(apzoom, origin='lower', interpolation='nearest', vmin=np.median(data), vmax=np.median(data) + 200)
#sky.plot()
#plt.show()
return [sky_sum, sky_error]
else:
return find_sky()
# Calculate final electron count with uncertainty
sample_size = 100
list = np.arange(0,sample_size)
sums = [] # list source-sky value of each iteration
errors = []
for i in list:
final_sum = ap_sum - find_sky()[0]
sums.append(final_sum)
final_error = ap_error + find_sky()[1]
errors.append(final_error)
electron_counts = np.mean(sums)
uncert = np.std(sums)
return [electron_counts, uncert, sums, errors] # return mean value of source-sky and propagated error
name_list = []
number_list = []
time_list = []
HA_list = []
ZA_list = []
air_list = []
filter_list = []
exp_list = []
electon_list = []
elec_error_list = []
mag_list = []
mag_error_list = []
all_counts_I = []
all_errors_I = []
Iexp = []
all_counts_R = []
all_errors_R = []
Rexp = []
# Return counts and propagated error from each frame in target directory
for name in os.listdir(directory):
ext = os.path.splitext(name)[1]
if ext == '.fits':
file = directory + '\\' + name
hdu = fits.open(file)[0]
if hdu.header['FILTERS'] == filter:
targname = target_simbad
number = hdu.header['OBSERNO']
time = hdu.header['UT']
hour_angle = hdu.header['HA']
zenith_angle = hdu.header['ZA']
airmass = hdu.header["AIRMASS"]
filter = hdu.header['FILTERS']
exposure = hdu.header['EXPTIME']
result = calc_electrons(file=file, simbad=target_simbad)
electrons = int(round(result[0]))
elec_error = int(round(result[1]))
ins_magnitude = round(-2.5*np.log10(electrons/exposure) + 25, 5)
ins_magnitude_error = round((2.5*elec_error)/(electrons*np.log(10)), 5)
name_list.append(str(targname))
number_list.append(number)
time_list.append(time)
HA_list.append(hour_angle)
ZA_list.append(zenith_angle)
air_list.append(airmass)
filter_list.append(filter)
exp_list.append(exposure)
electon_list.append(electrons)
elec_error_list.append(elec_error)
mag_list.append(ins_magnitude)
mag_error_list.append(ins_magnitude_error)
print number, filter, electrons, elec_error, ins_magnitude, ins_magnitude_error
# Put data in table and save in target directory
columns = 'Target', 'ObsNo', 'UT', 'HA', 'ZA', 'AirMass', 'Filter', 'IntTime', 'IntCounts', 'IC_error', 'Imag', 'IM_error'
data = [name_list, number_list, time_list, HA_list, ZA_list, air_list, filter_list, exp_list, electon_list,
elec_error_list, mag_list, mag_error_list]
data_table = Table(data=data, names=columns, meta={'name': target_simbad})
data_table.show_in_browser(jsviewer=True)
table_name = '%s\\%s_%s_data.txt' % (save_to, target_simbad, filter)
if os.path.isfile(table_name) is True:
print 'Data table already exists for the target \'%s\'' % target_simbad
else:
data_table.write(table_name, format='ascii')
'''
import matplotlib.pyplot as plt
RA = 139.4375
Dec = 46.2069
file = 'D:\\UChicago\\Reduced Data\\2015Jun02\\Part 1\\1RXS_J091744.5+461229\\lmi.1RXS_J091744.5+461229_91_I.fits'
hdu = fits.open(file)
data = hdu[0].data
wcs = WCS(hdu[0].header)
pix = wcs.all_world2pix(RA,Dec,0)
xpix = int(pix[0]) + 5 # adding constants because reference pixel appears to be off (regardless of target)
ypix = int(pix[1]) - 30
# Trim data to 100x100 pixels near target; fit 2D Gaussian to find center pixel of target
centzoom = data[ypix-50:ypix+50, xpix-50:xpix+50]
plt.figure()
plt.imshow(centzoom, origin='lower', cmap='gray', vmin=np.median(data), vmax=np.median(data)+200)
plt.colorbar()
plt.show()
'''
| mit |
Arafatk/sympy | sympy/plotting/tests/test_plot.py | 43 | 8577 | from sympy import (pi, sin, cos, Symbol, Integral, summation, sqrt, log,
oo, LambertW, I, meijerg, exp_polar, Max, Piecewise)
from sympy.plotting import (plot, plot_parametric, plot3d_parametric_line,
plot3d, plot3d_parametric_surface)
from sympy.plotting.plot import unset_show
from sympy.utilities.pytest import skip, raises
from sympy.plotting.experimental_lambdify import lambdify
from sympy.external import import_module
from sympy.core.decorators import wraps
from tempfile import NamedTemporaryFile
import os
import sys
class MockPrint(object):
def write(self, s):
pass
def disable_print(func, *args, **kwargs):
@wraps(func)
def wrapper(*args, **kwargs):
sys.stdout = MockPrint()
func(*args, **kwargs)
sys.stdout = sys.__stdout__
return wrapper
unset_show()
# XXX: We could implement this as a context manager instead
# That would need rewriting the plot_and_save() function
# entirely
class TmpFileManager:
tmp_files = []
@classmethod
def tmp_file(cls, name=''):
cls.tmp_files.append(NamedTemporaryFile(prefix=name, suffix='.png').name)
return cls.tmp_files[-1]
@classmethod
def cleanup(cls):
map(os.remove, cls.tmp_files)
def plot_and_save(name):
tmp_file = TmpFileManager.tmp_file
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
###
# Examples from the 'introduction' notebook
###
p = plot(x)
p = plot(x*sin(x), x*cos(x))
p.extend(p)
p[0].line_color = lambda a: a
p[1].line_color = 'b'
p.title = 'Big title'
p.xlabel = 'the x axis'
p[1].label = 'straight line'
p.legend = True
p.aspect_ratio = (1, 1)
p.xlim = (-15, 20)
p.save(tmp_file('%s_basic_options_and_colors' % name))
p.extend(plot(x + 1))
p.append(plot(x + 3, x**2)[1])
p.save(tmp_file('%s_plot_extend_append' % name))
p[2] = plot(x**2, (x, -2, 3))
p.save(tmp_file('%s_plot_setitem' % name))
p = plot(sin(x), (x, -2*pi, 4*pi))
p.save(tmp_file('%s_line_explicit' % name))
p = plot(sin(x))
p.save(tmp_file('%s_line_default_range' % name))
p = plot((x**2, (x, -5, 5)), (x**3, (x, -3, 3)))
p.save(tmp_file('%s_line_multiple_range' % name))
raises(ValueError, lambda: plot(x, y))
p = plot(Piecewise((1, x > 0), (0, True)),(x,-1,1))
p.save(tmp_file('%s_plot_piecewise' % name))
#parametric 2d plots.
#Single plot with default range.
plot_parametric(sin(x), cos(x)).save(tmp_file())
#Single plot with range.
p = plot_parametric(sin(x), cos(x), (x, -5, 5))
p.save(tmp_file('%s_parametric_range' % name))
#Multiple plots with same range.
p = plot_parametric((sin(x), cos(x)), (x, sin(x)))
p.save(tmp_file('%s_parametric_multiple' % name))
#Multiple plots with different ranges.
p = plot_parametric((sin(x), cos(x), (x, -3, 3)), (x, sin(x), (x, -5, 5)))
p.save(tmp_file('%s_parametric_multiple_ranges' % name))
#depth of recursion specified.
p = plot_parametric(x, sin(x), depth=13)
p.save(tmp_file('%s_recursion_depth' % name))
#No adaptive sampling.
p = plot_parametric(cos(x), sin(x), adaptive=False, nb_of_points=500)
p.save(tmp_file('%s_adaptive' % name))
#3d parametric plots
p = plot3d_parametric_line(sin(x), cos(x), x)
p.save(tmp_file('%s_3d_line' % name))
p = plot3d_parametric_line(
(sin(x), cos(x), x, (x, -5, 5)), (cos(x), sin(x), x, (x, -3, 3)))
p.save(tmp_file('%s_3d_line_multiple' % name))
p = plot3d_parametric_line(sin(x), cos(x), x, nb_of_points=30)
p.save(tmp_file('%s_3d_line_points' % name))
# 3d surface single plot.
p = plot3d(x * y)
p.save(tmp_file('%s_surface' % name))
# Multiple 3D plots with same range.
p = plot3d(-x * y, x * y, (x, -5, 5))
p.save(tmp_file('%s_surface_multiple' % name))
# Multiple 3D plots with different ranges.
p = plot3d(
(x * y, (x, -3, 3), (y, -3, 3)), (-x * y, (x, -3, 3), (y, -3, 3)))
p.save(tmp_file('%s_surface_multiple_ranges' % name))
# Single Parametric 3D plot
p = plot3d_parametric_surface(sin(x + y), cos(x - y), x - y)
p.save(tmp_file('%s_parametric_surface' % name))
# Multiple Parametric 3D plots.
p = plot3d_parametric_surface(
(x*sin(z), x*cos(z), z, (x, -5, 5), (z, -5, 5)),
(sin(x + y), cos(x - y), x - y, (x, -5, 5), (y, -5, 5)))
p.save(tmp_file('%s_parametric_surface' % name))
###
# Examples from the 'colors' notebook
###
p = plot(sin(x))
p[0].line_color = lambda a: a
p.save(tmp_file('%s_colors_line_arity1' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_line_arity2' % name))
p = plot(x*sin(x), x*cos(x), (x, 0, 10))
p[0].line_color = lambda a: a
p.save(tmp_file('%s_colors_param_line_arity1' % name))
p[0].line_color = lambda a, b: a
p.save(tmp_file('%s_colors_param_line_arity2a' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_param_line_arity2b' % name))
p = plot3d_parametric_line(sin(x) + 0.1*sin(x)*cos(7*x),
cos(x) + 0.1*cos(x)*cos(7*x),
0.1*sin(7*x),
(x, 0, 2*pi))
p[0].line_color = lambda a: sin(4*a)
p.save(tmp_file('%s_colors_3d_line_arity1' % name))
p[0].line_color = lambda a, b: b
p.save(tmp_file('%s_colors_3d_line_arity2' % name))
p[0].line_color = lambda a, b, c: c
p.save(tmp_file('%s_colors_3d_line_arity3' % name))
p = plot3d(sin(x)*y, (x, 0, 6*pi), (y, -5, 5))
p[0].surface_color = lambda a: a
p.save(tmp_file('%s_colors_surface_arity1' % name))
p[0].surface_color = lambda a, b: b
p.save(tmp_file('%s_colors_surface_arity2' % name))
p[0].surface_color = lambda a, b, c: c
p.save(tmp_file('%s_colors_surface_arity3a' % name))
p[0].surface_color = lambda a, b, c: sqrt((a - 3*pi)**2 + b**2)
p.save(tmp_file('%s_colors_surface_arity3b' % name))
p = plot3d_parametric_surface(x * cos(4 * y), x * sin(4 * y), y,
(x, -1, 1), (y, -1, 1))
p[0].surface_color = lambda a: a
p.save(tmp_file('%s_colors_param_surf_arity1' % name))
p[0].surface_color = lambda a, b: a*b
p.save(tmp_file('%s_colors_param_surf_arity2' % name))
p[0].surface_color = lambda a, b, c: sqrt(a**2 + b**2 + c**2)
p.save(tmp_file('%s_colors_param_surf_arity3' % name))
###
# Examples from the 'advanced' notebook
###
i = Integral(log((sin(x)**2 + 1)*sqrt(x**2 + 1)), (x, 0, y))
p = plot(i, (y, 1, 5))
p.save(tmp_file('%s_advanced_integral' % name))
s = summation(1/x**y, (x, 1, oo))
p = plot(s, (y, 2, 10))
p.save(tmp_file('%s_advanced_inf_sum' % name))
p = plot(summation(1/x, (x, 1, y)), (y, 2, 10), show=False)
p[0].only_integers = True
p[0].steps = True
p.save(tmp_file('%s_advanced_fin_sum' % name))
###
# Test expressions that can not be translated to np and generate complex
# results.
###
plot(sin(x) + I*cos(x)).save(tmp_file())
plot(sqrt(sqrt(-x))).save(tmp_file())
plot(LambertW(x)).save(tmp_file())
plot(sqrt(LambertW(x))).save(tmp_file())
#Characteristic function of a StudentT distribution with nu=10
plot((meijerg(((1 / 2,), ()), ((5, 0, 1 / 2), ()), 5 * x**2 * exp_polar(-I*pi)/2)
+ meijerg(((1/2,), ()), ((5, 0, 1/2), ()),
5*x**2 * exp_polar(I*pi)/2)) / (48 * pi), (x, 1e-6, 1e-2)).save(tmp_file())
def test_matplotlib():
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
try:
plot_and_save('test')
finally:
# clean up
TmpFileManager.cleanup()
else:
skip("Matplotlib not the default backend")
# Tests for exception handling in experimental_lambdify
def test_experimental_lambify():
x = Symbol('x')
f = lambdify([x], Max(x, 5))
# XXX should f be tested? If f(2) is attempted, an
# error is raised because a complex produced during wrapping of the arg
# is being compared with an int.
assert Max(2, 5) == 5
assert Max(5, 7) == 7
x = Symbol('x-3')
f = lambdify([x], x + 1)
assert f(1) == 2
@disable_print
def test_append_issue_7140():
x = Symbol('x')
p1 = plot(x)
p2 = plot(x**2)
p3 = plot(x + 2)
# append a series
p2.append(p1[0])
assert len(p2._series) == 2
with raises(TypeError):
p1.append(p2)
with raises(TypeError):
p1.append(p2._series)
| bsd-3-clause |
keflavich/APEX_CMZ_H2CO | plot_codes/tmap_figure.py | 2 | 12670 | import pylab as pl
import numpy as np
import aplpy
import os
import copy
from astropy import log
from paths import h2copath, figurepath
import paths
import matplotlib
from scipy import stats as ss
from astropy.io import fits
matplotlib.rc_file(paths.pcpath('pubfiguresrc'))
pl.ioff()
# Close these figures so we can remake them in the appropriate size
for fignum in (4,5,6,7):
pl.close(fignum)
cmap = pl.cm.RdYlBu_r
figsize = (20,10)
small_recen = dict(x=0.3, y=-0.03,width=1.05,height=0.27)
big_recen = dict(x=0.55, y=-0.075,width=2.3,height=0.40)
sgrb2x = [000.6773, 0.6578, 0.6672]
sgrb2y = [-00.0290, -00.0418, -00.0364]
vmin=10
vmax = 200
dustcolumn = '/Users/adam/work/gc/gcmosaic_column_conv36.fits'
# most of these come from make_ratiotem_cubesims
toloop = zip((
'H2CO_321220_to_303202{0}_bl_integ_temperature_dens3e4.fits',
'H2CO_321220_to_303202{0}_bl_integ_weighted_temperature_dens3e4.fits',
'H2CO_321220_to_303202{0}_bl_integ_temperature_dens1e4.fits',
'H2CO_321220_to_303202{0}_bl_integ_weighted_temperature_dens1e4.fits',
'H2CO_321220_to_303202{0}_bl_integ_temperature_dens1e4_abund1e-8.fits',
'H2CO_321220_to_303202{0}_bl_integ_weighted_temperature_dens1e4_abund1e-8.fits',
'H2CO_321220_to_303202{0}_bl_integ_temperature_dens1e4_abund1e-10.fits',
'H2CO_321220_to_303202{0}_bl_integ_weighted_temperature_dens1e4_abund1e-10.fits',
'H2CO_321220_to_303202{0}_bl_integ_temperature_dens1e5.fits',
'H2CO_321220_to_303202{0}_bl_integ_weighted_temperature_dens1e5.fits',
'H2CO_321220_to_303202{0}_bl_integ_temperature_dens1e4_masked.fits',
'H2CO_321220_to_303202{0}_bl_integ_weighted_temperature_dens1e4_masked.fits',
'H2CO_321220_to_303202{0}_bl_integ_temperature_dens3e4_masked.fits',
'H2CO_321220_to_303202{0}_bl_integ_weighted_temperature_dens3e4_masked.fits',
'H2CO_321220_to_303202{0}_bl_integ_temperature_dens1e5_masked.fits',
'H2CO_321220_to_303202{0}_bl_integ_weighted_temperature_dens1e5_masked.fits',
'TemperatureCube_DendrogramObjects{0}_leaves_integ.fits',
'TemperatureCube_DendrogramObjects{0}_leaves_integ_weighted.fits',
'TemperatureCube_DendrogramObjects{0}_integ.fits',
'TemperatureCube_DendrogramObjects{0}_integ_weighted.fits'),
('dens3e4', 'dens3e4_weighted',
'dens1e4', 'dens1e4_weighted',
'dens1e4_abund1e-8', 'dens1e4_abund1e-8_weighted',
'dens1e4_abund1e-10', 'dens1e4_abund1e-10_weighted',
'dens1e5', 'dens1e5_weighted',
'dens1e4_masked','dens1e4_weighted_masked',
'dens3e4_masked','dens3e4_weighted_masked',
'dens1e5_masked','dens1e5_weighted_masked',
'dendro_leaf','dendro_leaf_weighted',
'dendro','dendro_weighted'))
#for vmax,vmax_str in zip((100,200),("_vmax100","")):
for vmax,vmax_str in zip((200,),("",)):
for ftemplate,outtype in toloop:
for smooth in ("","_smooth",):#"_vsmooth"):
log.info(ftemplate.format(smooth)+" "+outtype)
fig = pl.figure(4, figsize=figsize)
fig.clf()
F = aplpy.FITSFigure(h2copath+ftemplate.format(smooth),
convention='calabretta',
figure=fig)
cm = copy.copy(cmap)
cm.set_bad((0.5,)*3)
F.show_colorscale(cmap=cm,vmin=vmin,vmax=vmax)
F.set_tick_labels_format('d.dd','d.dd')
F.recenter(**small_recen)
peaksn = os.path.join(h2copath,'APEX_H2CO_303_202{0}_bl_mask_integ.fits'.format(smooth))
#F.show_contour(peaksn, levels=[4,7,11,20,38], colors=[(0.25,0.25,0.25,0.5)]*5, #smooth=3,
# linewidths=[1.0]*5,
# zorder=10, convention='calabretta')
#color = (0.25,)*3
#F.show_contour(peaksn, levels=[4,7,11,20,38], colors=[color + (alpha,) for alpha in (0.9,0.6,0.3,0.1,0.0)], #smooth=3,
# filled=True,
# #linewidths=[1.0]*5,
# zorder=10, convention='calabretta')
color = (0.5,)*3 # should be same as background #888
F.show_contour(peaksn, levels=[-1,0]+np.logspace(0.20,2).tolist(),
colors=[(0.5,0.5,0.5,1)]*2 + [color + (alpha,) for alpha in np.exp(-(np.logspace(0.20,2)-1.7)**2/(2.5**2*2.))], #smooth=3,
filled=True,
#linewidths=[1.0]*5,
layer='mask',
zorder=10, convention='calabretta',
rasterized=True)
F.add_colorbar()
F.colorbar.set_axis_label_text('T (K)')
F.colorbar.set_axis_label_font(size=18)
F.colorbar.set_label_properties(size=16)
F.show_markers(sgrb2x, sgrb2y, color='k', facecolor='k', s=250,
edgecolor='k', alpha=0.9)
F.save(os.path.join(figurepath, "big_maps", 'lores{0}{1}{2}_tmap_withmask.pdf'.format(smooth, outtype, vmax_str)))
F.recenter(**big_recen)
F.save(os.path.join(figurepath, "big_maps", 'big_lores{0}{1}{2}_tmap_withmask.pdf'.format(smooth, outtype, vmax_str)))
log.info(os.path.join(figurepath, "big_maps", 'big_lores{0}{1}{2}_tmap_withmask.pdf'.format(smooth, outtype, vmax_str)))
F.show_contour(dustcolumn,
levels=[5], colors=[(0,0,0,0.5)], zorder=15,
alpha=0.5,
linewidths=[0.5],
layer='dustcontour')
F.recenter(**small_recen)
F.save(os.path.join(figurepath, "big_maps", 'lores{0}{1}{2}_tmap_withcontours.pdf'.format(smooth, outtype, vmax_str)))
F.recenter(**big_recen)
F.save(os.path.join(figurepath, "big_maps", 'big_lores{0}{1}{2}_tmap_withcontours.pdf'.format(smooth, outtype, vmax_str)))
log.info(os.path.join(figurepath, "big_maps", 'big_lores{0}{1}{2}_tmap_withcontours.pdf'.format(smooth, outtype, vmax_str)))
F.hide_layer('mask')
F.recenter(**small_recen)
F.save(os.path.join(figurepath, "big_maps", 'lores{0}{1}{2}_tmap_nomask_withcontours.pdf'.format(smooth, outtype, vmax_str)))
F.recenter(**big_recen)
F.save(os.path.join(figurepath, "big_maps", 'big_lores{0}{1}{2}_tmap_nomask_withcontours.pdf'.format(smooth, outtype, vmax_str)))
fig7 = pl.figure(7, figsize=figsize)
fig7.clf()
Fsn = aplpy.FITSFigure(peaksn, convention='calabretta', figure=fig7)
Fsn.show_grayscale(vmin=0, vmax=10, stretch='linear', invert=True)
Fsn.add_colorbar()
Fsn.colorbar.set_axis_label_text('Peak S/N')
Fsn.colorbar.set_axis_label_font(size=18)
Fsn.colorbar.set_label_properties(size=16)
Fsn.set_tick_labels_format('d.dd','d.dd')
Fsn.recenter(**big_recen)
Fsn.save(os.path.join(figurepath, "big_maps", 'big_lores{0}{1}{2}_peaksn.pdf'.format(smooth, outtype, vmax_str)))
F.hide_layer('dustcontour')
dusttemperature = '/Users/adam/work/gc/gcmosaic_temp_conv36.fits'
F.show_contour(dusttemperature,
levels=[20,25],
colors=[(0,0,x,0.5) for x in [0.9,0.7,0.6,0.2]], zorder=20)
F.recenter(**small_recen)
F.save(os.path.join(figurepath, "big_maps",'lores{0}{1}{2}_tmap_withtdustcontours.pdf'.format(smooth, outtype, vmax_str)))
F.recenter(**big_recen)
F.save(os.path.join(figurepath, "big_maps",'big_lores{0}{1}{2}_tmap_withtdustcontours.pdf'.format(smooth, outtype, vmax_str)))
log.info(os.path.join(figurepath, "big_maps",'big_lores{0}{1}{2}_tmap_withtdustcontours.pdf'.format(smooth, outtype, vmax_str)))
im = fits.getdata(h2copath+ftemplate.format(smooth))
data = im[np.isfinite(im)]
fig9 = pl.figure(9)
fig9.clf()
ax9 = fig9.gca()
h,l,p = ax9.hist(data, bins=np.linspace(0,300), alpha=0.5)
shape, loc, scale = ss.lognorm.fit(data, floc=0)
# from http://nbviewer.ipython.org/url/xweb.geos.ed.ac.uk/~jsteven5/blog/lognormal_distributions.ipynb
mu = np.log(scale) # Mean of log(X) [but I want mean(x)]
sigma = shape # Standard deviation of log(X)
M = np.exp(mu) # Geometric mean == median
s = np.exp(sigma) # Geometric standard deviation
lnf = ss.lognorm(s=shape, loc=loc, scale=scale)
pdf = lnf.pdf(np.arange(300))
label1 = ("$\sigma_{{\mathrm{{ln}} x}} = {0:0.2f}$\n"
"$\mu_x = {1:0.2f}$\n"
"$\sigma_x = {2:0.2f}$".format(sigma, scale,s))
pm = np.abs(ss.lognorm.interval(0.683, s=shape, loc=0, scale=scale) - scale)
label2 = ("$x = {0:0.1f}^{{+{1:0.1f}}}_{{-{2:0.1f}}}$\n"
"$\sigma_{{\mathrm{{ln}} x}} = {3:0.1f}$\n"
.format(scale,
pm[1],
pm[0],
sigma,
))
ax9.plot(np.arange(300), pdf*h.max()/pdf.max(), linewidth=4, alpha=0.5,
label=label2)
ax9.legend(loc='best')
ax9.set_xlim(0,300)
fig9.savefig(os.path.join(figurepath, "big_maps",
'histogram_{0}{1}{2}_tmap.pdf'.format(smooth,
outtype, vmax_str)),
bbox_inches='tight')
#F.show_contour('h2co218222_all.fits', levels=[1,7,11,20,38], colors=['g']*5, smooth=1, zorder=5)
#F.show_contour(datapath+'APEX_H2CO_merge_high_smooth_noise.fits', levels=[0.05,0.1], colors=['#0000FF']*2, zorder=3, convention='calabretta')
#F.show_contour(datapath+'APEX_H2CO_merge_high_nhits.fits', levels=[9], colors=['#0000FF']*2, zorder=3, convention='calabretta',smooth=3)
#F.show_regions('2014_expansion_targets_simpler.reg')
#F.save('CMZ_H2CO_observed_planned.pdf')
#F.show_rgb(background, wcs=wcs)
#F.save('CMZ_H2CO_observed_planned_colorful.pdf')
fig = pl.figure(5, figsize=figsize)
fig.clf()
F2 = aplpy.FITSFigure(dusttemperature, convention='calabretta', figure=fig)
F2.show_colorscale(cmap=pl.cm.hot, vmin=10, vmax=40)
F2.add_colorbar()
F2.show_contour(h2copath+'H2CO_321220_to_303202_smooth_bl_integ_temperature.fits',
convention='calabretta',
levels=[30,75,100,150],
cmap=pl.cm.BuGn)
F2.recenter(**small_recen)
F2.show_markers(sgrb2x, sgrb2y, color='k', facecolor='k', s=250,
edgecolor='k', alpha=0.9)
F2.save(os.path.join(figurepath, "big_maps",'H2COtemperatureOnDust.pdf'))
F2.recenter(**big_recen)
F2.save(os.path.join(figurepath, "big_maps",'big_H2COtemperatureOnDust.pdf'))
for vmax in (100,200):
fig = pl.figure(6, figsize=figsize)
fig.clf()
F = aplpy.FITSFigure('/Users/adam/work/gc/Tkin-GC.fits.gz',
convention='calabretta',
figure=fig)
cm = copy.copy(cmap)
cm.set_bad((0.5,)*3)
F.show_colorscale(cmap=cm,vmin=vmin,vmax=vmax)
F.set_tick_labels_format('d.dd','d.dd')
F.recenter(**small_recen)
F.add_colorbar()
F.colorbar.set_axis_label_text('T (K)')
F.colorbar.set_axis_label_font(size=18)
F.colorbar.set_label_properties(size=16)
F.show_markers(sgrb2x, sgrb2y, color='k', facecolor='k', s=250,
edgecolor='k', alpha=0.9)
F.save(os.path.join(figurepath, "big_maps", 'ott2014_nh3_tmap_15to{0}.pdf'.format(vmax)))
F.show_colorscale(cmap=cm,vmin=vmin,vmax=80)
F.save(os.path.join(figurepath, "big_maps", 'ott2014_nh3_tmap_15to80.pdf'))
F.show_contour(dustcolumn,
levels=[5], colors=[(0,0,0,0.5)], zorder=15,
alpha=0.5,
linewidths=[0.5],
layer='dustcontour')
F.save(os.path.join(figurepath, "big_maps", 'ott2014_nh3_tmap_15to80_withcontours.pdf'))
F.show_colorscale(cmap=cm,vmin=vmin,vmax=vmax)
F.save(os.path.join(figurepath, "big_maps", 'ott2014_nh3_tmap_15to{0}_withcontours.pdf'.format(vmax)))
| bsd-3-clause |
MechCoder/scikit-learn | sklearn/linear_model/tests/test_base.py | 83 | 15089 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from itertools import product
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import _preprocess_data
from sklearn.linear_model.base import sparse_center_data, center_data
from sklearn.linear_model.base import _rescale_data
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_greater
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
rng = np.random.RandomState(0)
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [1])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [0])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [0])
def test_linear_regression_sample_weights():
# TODO: loop over sparse data as well
rng = np.random.RandomState(0)
# It would not work with under-determined systems
for n_samples, n_features in ((6, 5), ):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
for intercept in (True, False):
# LinearRegression with explicit sample_weight
reg = LinearRegression(fit_intercept=intercept)
reg.fit(X, y, sample_weight=sample_weight)
coefs1 = reg.coef_
inter1 = reg.intercept_
assert_equal(reg.coef_.shape, (X.shape[1], )) # sanity checks
assert_greater(reg.score(X, y), 0.5)
# Closed form of the weighted least square
# theta = (X^T W X)^(-1) * X^T W y
W = np.diag(sample_weight)
if intercept is False:
X_aug = X
else:
dummy_column = np.ones(shape=(n_samples, 1))
X_aug = np.concatenate((dummy_column, X), axis=1)
coefs2 = linalg.solve(X_aug.T.dot(W).dot(X_aug),
X_aug.T.dot(W).dot(y))
if intercept is False:
assert_array_almost_equal(coefs1, coefs2)
else:
assert_array_almost_equal(coefs1, coefs2[1:])
assert_almost_equal(inter1, coefs2[0])
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
reg = LinearRegression()
# make sure the "OK" sample weights actually work
reg.fit(X, y, sample_weights_OK)
reg.fit(X, y, sample_weights_OK_1)
reg.fit(X, y, sample_weights_OK_2)
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
# Test that linear regression also works with sparse data
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.predict(X) - y.ravel(), 0)
def test_linear_regression_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
reg = LinearRegression(fit_intercept=True)
reg.fit((X), Y)
assert_equal(reg.coef_.shape, (2, n_features))
Y_pred = reg.predict(X)
reg.fit(X, y)
y_pred = reg.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions with sparse data
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_preprocess_data():
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
expected_X_norm = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=False, normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_norm)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_preprocess_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [X, sparse.csc_matrix(X)]
for X in args:
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_preprocess_data_weighted():
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
expected_X_norm = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_norm)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_preprocess_data_with_return_mean():
n_samples = 200
n_features = 2
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
expected_X_norm = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=False, normalize=False,
return_mean=True)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False,
return_mean=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True,
return_mean=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt.A, XA / expected_X_norm)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_preprocess_data():
# Test output format of _preprocess_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = _preprocess_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
def test_rescale_data():
n_samples = 200
n_features = 2
sample_weight = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
rescaled_X, rescaled_y = _rescale_data(X, y, sample_weight)
rescaled_X2 = X * np.sqrt(sample_weight)[:, np.newaxis]
rescaled_y2 = y * np.sqrt(sample_weight)
assert_array_almost_equal(rescaled_X, rescaled_X2)
assert_array_almost_equal(rescaled_y, rescaled_y2)
@ignore_warnings # all deprecation warnings
def test_deprecation_center_data():
n_samples = 200
n_features = 2
w = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
param_grid = product([True, False], [True, False], [True, False],
[None, w])
for (fit_intercept, normalize, copy, sample_weight) in param_grid:
XX = X.copy() # such that we can try copy=False as well
X1, y1, X1_mean, X1_var, y1_mean = \
center_data(XX, y, fit_intercept=fit_intercept,
normalize=normalize, copy=copy,
sample_weight=sample_weight)
XX = X.copy()
X2, y2, X2_mean, X2_var, y2_mean = \
_preprocess_data(XX, y, fit_intercept=fit_intercept,
normalize=normalize, copy=copy,
sample_weight=sample_weight)
assert_array_almost_equal(X1, X2)
assert_array_almost_equal(y1, y2)
assert_array_almost_equal(X1_mean, X2_mean)
assert_array_almost_equal(X1_var, X2_var)
assert_array_almost_equal(y1_mean, y2_mean)
# Sparse cases
X = sparse.csr_matrix(X)
for (fit_intercept, normalize, copy, sample_weight) in param_grid:
X1, y1, X1_mean, X1_var, y1_mean = \
center_data(X, y, fit_intercept=fit_intercept, normalize=normalize,
copy=copy, sample_weight=sample_weight)
X2, y2, X2_mean, X2_var, y2_mean = \
_preprocess_data(X, y, fit_intercept=fit_intercept,
normalize=normalize, copy=copy,
sample_weight=sample_weight, return_mean=False)
assert_array_almost_equal(X1.toarray(), X2.toarray())
assert_array_almost_equal(y1, y2)
assert_array_almost_equal(X1_mean, X2_mean)
assert_array_almost_equal(X1_var, X2_var)
assert_array_almost_equal(y1_mean, y2_mean)
for (fit_intercept, normalize) in product([True, False], [True, False]):
X1, y1, X1_mean, X1_var, y1_mean = \
sparse_center_data(X, y, fit_intercept=fit_intercept,
normalize=normalize)
X2, y2, X2_mean, X2_var, y2_mean = \
_preprocess_data(X, y, fit_intercept=fit_intercept,
normalize=normalize, return_mean=True)
assert_array_almost_equal(X1.toarray(), X2.toarray())
assert_array_almost_equal(y1, y2)
assert_array_almost_equal(X1_mean, X2_mean)
assert_array_almost_equal(X1_var, X2_var)
assert_array_almost_equal(y1_mean, y2_mean)
| bsd-3-clause |
smartscheduling/scikit-learn-categorical-tree | sklearn/metrics/scorer.py | 13 | 13090 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.grid_search.GridSearchCV` or
:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,
to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <[email protected]>
# Lars Buitinck <[email protected]>
# Arnaud Joly <[email protected]>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
from functools import partial
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, accuracy_score, f1_score,
roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should a be an estimator implementing "
"'fit' method, %r was passed" % estimator)
elif has_scoring:
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.grid_search import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
adjusted_rand_score=adjusted_rand_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(partial(metric, pos_label=None,
average=average))
| bsd-3-clause |
wlamond/scikit-learn | examples/bicluster/plot_spectral_coclustering.py | 127 | 1732 | """
==============================================
A demo of the Spectral Co-Clustering algorithm
==============================================
This example demonstrates how to generate a dataset and bicluster it
using the Spectral Co-Clustering algorithm.
The dataset is generated using the ``make_biclusters`` function, which
creates a matrix of small values and implants bicluster with large
values. The rows and columns are then shuffled and passed to the
Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to
make biclusters contiguous shows how accurately the algorithm found
the biclusters.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_biclusters
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.metrics import consensus_score
data, rows, columns = make_biclusters(
shape=(300, 300), n_clusters=5, noise=5,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralCoclustering(n_clusters=5, random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.3f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.show()
| bsd-3-clause |
rknLA/sms-tools | lectures/04-STFT/plots-code/spectrogram.py | 19 | 1174 | import numpy as np
import time, os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import stft as STFT
import utilFunctions as UF
import matplotlib.pyplot as plt
from scipy.signal import hamming
from scipy.fftpack import fft
import math
(fs, x) = UF.wavread('../../../sounds/piano.wav')
w = np.hamming(1001)
N = 1024
H = 256
mX, pX = STFT.stftAnal(x, fs, w, N, H)
plt.figure(1, figsize=(9.5, 6))
plt.subplot(211)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(N/2+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX))
plt.title('mX (piano.wav), M=1001, N=1024, H=256')
plt.autoscale(tight=True)
plt.subplot(212)
numFrames = int(pX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(N/2+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.diff(np.transpose(pX),axis=0))
plt.title('pX derivative (piano.wav), M=1001, N=1024, H=256')
plt.autoscale(tight=True)
plt.tight_layout()
plt.savefig('spectrogram.png')
plt.show()
| agpl-3.0 |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/pandas/tests/dtypes/test_missing.py | 7 | 12126 | # -*- coding: utf-8 -*-
from warnings import catch_warnings
import numpy as np
from datetime import datetime
from pandas.util import testing as tm
import pandas as pd
from pandas.core import config as cf
from pandas.compat import u
from pandas._libs.tslib import iNaT
from pandas import (NaT, Float64Index, Series,
DatetimeIndex, TimedeltaIndex, date_range)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import (
array_equivalent, isnull, notnull,
na_value_for_dtype)
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert (isinstance(isnull(s), Series))
class TestIsNull(object):
def test_0d_array(self):
assert isnull(np.array(np.nan))
assert not isnull(np.array(0.0))
assert not isnull(np.array(0))
# test object dtype
assert isnull(np.array(np.nan, dtype=object))
assert not isnull(np.array(0.0, dtype=object))
assert not isnull(np.array(0, dtype=object))
def test_empty_object(self):
for shape in [(4, 0), (4,)]:
arr = np.empty(shape=shape, dtype=object)
result = isnull(arr)
expected = np.ones(shape=shape, dtype=bool)
tm.assert_numpy_array_equal(result, expected)
def test_isnull(self):
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert float('nan')
assert not isnull(np.inf)
assert not isnull(-np.inf)
# series
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert isinstance(isnull(s), Series)
# frame
for df in [tm.makeTimeDataFrame(), tm.makePeriodFrame(),
tm.makeMixedDataFrame()]:
result = isnull(df)
expected = df.apply(isnull)
tm.assert_frame_equal(result, expected)
# panel
with catch_warnings(record=True):
for p in [tm.makePanel(), tm.makePeriodPanel(),
tm.add_nans(tm.makePanel())]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel_equal(result, expected)
# panel 4d
with catch_warnings(record=True):
for p in [tm.makePanel4D(), tm.add_nans_panel4d(tm.makePanel4D())]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel4d_equal(result, expected)
def test_isnull_lists(self):
result = isnull([[False]])
exp = np.array([[False]])
tm.assert_numpy_array_equal(result, exp)
result = isnull([[1], [2]])
exp = np.array([[False], [False]])
tm.assert_numpy_array_equal(result, exp)
# list of strings / unicode
result = isnull(['foo', 'bar'])
exp = np.array([False, False])
tm.assert_numpy_array_equal(result, exp)
result = isnull([u('foo'), u('bar')])
exp = np.array([False, False])
tm.assert_numpy_array_equal(result, exp)
def test_isnull_nat(self):
result = isnull([NaT])
exp = np.array([True])
tm.assert_numpy_array_equal(result, exp)
result = isnull(np.array([NaT], dtype=object))
exp = np.array([True])
tm.assert_numpy_array_equal(result, exp)
def test_isnull_numpy_nat(self):
arr = np.array([NaT, np.datetime64('NaT'), np.timedelta64('NaT'),
np.datetime64('NaT', 's')])
result = isnull(arr)
expected = np.array([True] * 4)
tm.assert_numpy_array_equal(result, expected)
def test_isnull_datetime(self):
assert not isnull(datetime.now())
assert notnull(datetime.now())
idx = date_range('1/1/1990', periods=20)
exp = np.ones(len(idx), dtype=bool)
tm.assert_numpy_array_equal(notnull(idx), exp)
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = isnull(idx)
assert mask[0]
exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
# GH 9129
pidx = idx.to_period(freq='M')
mask = isnull(pidx)
assert mask[0]
exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
mask = isnull(pidx[1:])
exp = np.zeros(len(mask), dtype=bool)
tm.assert_numpy_array_equal(mask, exp)
def test_datetime_other_units(self):
idx = pd.DatetimeIndex(['2011-01-01', 'NaT', '2011-01-02'])
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(idx), exp)
tm.assert_numpy_array_equal(notnull(idx), ~exp)
tm.assert_numpy_array_equal(isnull(idx.values), exp)
tm.assert_numpy_array_equal(notnull(idx.values), ~exp)
for dtype in ['datetime64[D]', 'datetime64[h]', 'datetime64[m]',
'datetime64[s]', 'datetime64[ms]', 'datetime64[us]',
'datetime64[ns]']:
values = idx.values.astype(dtype)
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(values), exp)
tm.assert_numpy_array_equal(notnull(values), ~exp)
exp = pd.Series([False, True, False])
s = pd.Series(values)
tm.assert_series_equal(isnull(s), exp)
tm.assert_series_equal(notnull(s), ~exp)
s = pd.Series(values, dtype=object)
tm.assert_series_equal(isnull(s), exp)
tm.assert_series_equal(notnull(s), ~exp)
def test_timedelta_other_units(self):
idx = pd.TimedeltaIndex(['1 days', 'NaT', '2 days'])
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(idx), exp)
tm.assert_numpy_array_equal(notnull(idx), ~exp)
tm.assert_numpy_array_equal(isnull(idx.values), exp)
tm.assert_numpy_array_equal(notnull(idx.values), ~exp)
for dtype in ['timedelta64[D]', 'timedelta64[h]', 'timedelta64[m]',
'timedelta64[s]', 'timedelta64[ms]', 'timedelta64[us]',
'timedelta64[ns]']:
values = idx.values.astype(dtype)
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(values), exp)
tm.assert_numpy_array_equal(notnull(values), ~exp)
exp = pd.Series([False, True, False])
s = pd.Series(values)
tm.assert_series_equal(isnull(s), exp)
tm.assert_series_equal(notnull(s), ~exp)
s = pd.Series(values, dtype=object)
tm.assert_series_equal(isnull(s), exp)
tm.assert_series_equal(notnull(s), ~exp)
def test_period(self):
idx = pd.PeriodIndex(['2011-01', 'NaT', '2012-01'], freq='M')
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(idx), exp)
tm.assert_numpy_array_equal(notnull(idx), ~exp)
exp = pd.Series([False, True, False])
s = pd.Series(idx)
tm.assert_series_equal(isnull(s), exp)
tm.assert_series_equal(notnull(s), ~exp)
s = pd.Series(idx, dtype=object)
tm.assert_series_equal(isnull(s), exp)
tm.assert_series_equal(notnull(s), ~exp)
def test_array_equivalent():
assert array_equivalent(np.array([np.nan, np.nan]),
np.array([np.nan, np.nan]))
assert array_equivalent(np.array([np.nan, 1, np.nan]),
np.array([np.nan, 1, np.nan]))
assert array_equivalent(np.array([np.nan, None], dtype='object'),
np.array([np.nan, None], dtype='object'))
assert array_equivalent(np.array([np.nan, 1 + 1j], dtype='complex'),
np.array([np.nan, 1 + 1j], dtype='complex'))
assert not array_equivalent(
np.array([np.nan, 1 + 1j], dtype='complex'), np.array(
[np.nan, 1 + 2j], dtype='complex'))
assert not array_equivalent(
np.array([np.nan, 1, np.nan]), np.array([np.nan, 2, np.nan]))
assert not array_equivalent(
np.array(['a', 'b', 'c', 'd']), np.array(['e', 'e']))
assert array_equivalent(Float64Index([0, np.nan]),
Float64Index([0, np.nan]))
assert not array_equivalent(
Float64Index([0, np.nan]), Float64Index([1, np.nan]))
assert array_equivalent(DatetimeIndex([0, np.nan]),
DatetimeIndex([0, np.nan]))
assert not array_equivalent(
DatetimeIndex([0, np.nan]), DatetimeIndex([1, np.nan]))
assert array_equivalent(TimedeltaIndex([0, np.nan]),
TimedeltaIndex([0, np.nan]))
assert not array_equivalent(
TimedeltaIndex([0, np.nan]), TimedeltaIndex([1, np.nan]))
assert array_equivalent(DatetimeIndex([0, np.nan], tz='US/Eastern'),
DatetimeIndex([0, np.nan], tz='US/Eastern'))
assert not array_equivalent(
DatetimeIndex([0, np.nan], tz='US/Eastern'), DatetimeIndex(
[1, np.nan], tz='US/Eastern'))
assert not array_equivalent(
DatetimeIndex([0, np.nan]), DatetimeIndex(
[0, np.nan], tz='US/Eastern'))
assert not array_equivalent(
DatetimeIndex([0, np.nan], tz='CET'), DatetimeIndex(
[0, np.nan], tz='US/Eastern'))
assert not array_equivalent(
DatetimeIndex([0, np.nan]), TimedeltaIndex([0, np.nan]))
def test_array_equivalent_compat():
# see gh-13388
m = np.array([(1, 2), (3, 4)], dtype=[('a', int), ('b', float)])
n = np.array([(1, 2), (3, 4)], dtype=[('a', int), ('b', float)])
assert (array_equivalent(m, n, strict_nan=True))
assert (array_equivalent(m, n, strict_nan=False))
m = np.array([(1, 2), (3, 4)], dtype=[('a', int), ('b', float)])
n = np.array([(1, 2), (4, 3)], dtype=[('a', int), ('b', float)])
assert (not array_equivalent(m, n, strict_nan=True))
assert (not array_equivalent(m, n, strict_nan=False))
m = np.array([(1, 2), (3, 4)], dtype=[('a', int), ('b', float)])
n = np.array([(1, 2), (3, 4)], dtype=[('b', int), ('a', float)])
assert (not array_equivalent(m, n, strict_nan=True))
assert (not array_equivalent(m, n, strict_nan=False))
def test_array_equivalent_str():
for dtype in ['O', 'S', 'U']:
assert array_equivalent(np.array(['A', 'B'], dtype=dtype),
np.array(['A', 'B'], dtype=dtype))
assert not array_equivalent(np.array(['A', 'B'], dtype=dtype),
np.array(['A', 'X'], dtype=dtype))
def test_na_value_for_dtype():
for dtype in [np.dtype('M8[ns]'), np.dtype('m8[ns]'),
DatetimeTZDtype('datetime64[ns, US/Eastern]')]:
assert na_value_for_dtype(dtype) is NaT
for dtype in ['u1', 'u2', 'u4', 'u8',
'i1', 'i2', 'i4', 'i8']:
assert na_value_for_dtype(np.dtype(dtype)) == 0
for dtype in ['bool']:
assert na_value_for_dtype(np.dtype(dtype)) is False
for dtype in ['f2', 'f4', 'f8']:
assert np.isnan(na_value_for_dtype(np.dtype(dtype)))
for dtype in ['O']:
assert np.isnan(na_value_for_dtype(np.dtype(dtype)))
| mit |
Clyde-fare/scikit-learn | examples/decomposition/plot_pca_3d.py | 354 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| bsd-3-clause |
yaukwankiu/armor | tests/modifiedMexicanHatTest18_march2014.py | 1 | 2792 | # modifiedMexicanHatTest18.py
# two 3d charts in one
"""
1. load xyz1 for compref(radar)
2. load xyz2 for wrf
3. fix xyz2
4. charting 2 in one
"""
inputFolder='/media/TOSHIBA EXT/ARMOR/labLogs/2014-5-26-modifiedMexicanHatTest17_Numerical_Spectrum_for_Typhoon_Kong-Rey_RADAR/'
dataSource = "Numerical_Spectrum_for_Typhoon_Kong-Rey_RADAR"
i=121
import pickle
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
xyz = pickle.load(open(inputFolder+'XYZ.pydump','r'))
X = xyz['X']
Y = xyz['Y']
Z = xyz['Z']
plt.close()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(X, np.log2(Y), np.log2(Z), rstride=1, cstride=1) #key line
plt.title(dataSource+ " " + str(i) + "DBZ images\n"+\
"x-axis: response intensity(from 0 to 20)\n"+\
"y-axis: log_2(sigma)\n"+\
"z-axis: log_2(count)\n")
plt.xlabel('response intensity')
plt.ylabel('log2(sigma)')
fig.show()
xyz1 = xyz
dataSource1 = dataSource
i1=i
#################################################################################
inputFolder='/media/TOSHIBA EXT/ARMOR/labLogs/2014-5-16-modifiedMexicanHatTest15_march2014/'
dataSource= "Numerical_Spectrum_for_Typhoon_Kong-Rey_march2014_sigmaPreprocessing10"
i=399
xyz = pickle.load(open(inputFolder+'XYZ.pydump','r'))
X = xyz['X']
Y = xyz['Y']
Z = xyz['Z']
plt.close()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(X, np.log2(Y), np.log2(Z), rstride=1, cstride=1) #key line
plt.title(dataSource+ " " + str(i) + "DBZ images\n"+\
"x-axis: response intensity(from 0 to 20)\n"+\
"y-axis: log_2(sigma)\n"+\
"z-axis: log_2(count)\n")
plt.xlabel('response intensity')
plt.ylabel('log2(sigma)')
fig.show()
xyz2=xyz
dataSource2 = dataSource
i2=i
##############################################################################
xyz2['X'] +=2 #in log2 scale
xyz2['Z'] +=2
plt.close()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X = xyz1['X']
Y = xyz1['Y']
Z = xyz1['Z']/121.
Z1 = Z
ax.plot_wireframe(X, np.log2(Y), np.log2(Z), rstride=1, cstride=1) #key line
X = xyz2['X']
Y = xyz2['Y']
Z = xyz2['Z']/399.
Z2 = Z
ax.plot_wireframe(X, np.log2(Y), np.log2(Z), rstride=1, cstride=1, colors="green") #key line
ax.plot_wireframe(X, np.log2(Y), (np.log2(Z1)-np.log2(Z2))*1, rstride=1, cstride=1, colors="red") #key line
plt.title("Blue: Averaged "+dataSource1+ " " + str(i1) + "DBZ images\n"+\
"Green: Averaged "+dataSource2+ " " + str(i2) + "DBZ images\n"+\
"Red: 1 x Difference of Blue and Green"
"y-axis: log_2(sigma)\n"+\
"z-axis: log_2(count)\n")
plt.xlabel('response intensity')
plt.ylabel('log2(sigma)')
fig.show()
| cc0-1.0 |
VipinRathor/zeppelin | python/src/main/resources/python/backend_zinline.py | 61 | 11831 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file provides a static (non-interactive) matplotlib plotting backend
# for zeppelin notebooks for use with the python/pyspark interpreters
from __future__ import print_function
import sys
import uuid
import warnings
import base64
from io import BytesIO
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import mpl_config
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backends.backend_agg import new_figure_manager, FigureCanvasAgg
from matplotlib.backend_bases import ShowBase, FigureManagerBase
from matplotlib.figure import Figure
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
class Show(ShowBase):
"""
A callable object that displays the figures to the screen. Valid kwargs
include figure width and height (in units supported by the div tag), block
(allows users to override blocking behavior regardless of whether or not
interactive mode is enabled, currently unused) and close (Implicitly call
matplotlib.pyplot.close('all') with each call to show()).
"""
def __call__(self, close=None, block=None, **kwargs):
if close is None:
close = mpl_config.get('close')
try:
managers = Gcf.get_all_fig_managers()
if not managers:
return
# Tell zeppelin that the output will be html using the %html magic
# We want to do this only once to avoid seeing "%html" printed
# directly to the outout when multiple figures are displayed from
# one paragraph.
if mpl_config.get('angular'):
print('%angular')
else:
print('%html')
# Show all open figures
for manager in managers:
manager.show(**kwargs)
finally:
# This closes all the figures if close is set to True.
if close and Gcf.get_all_fig_managers():
Gcf.destroy_all()
class FigureCanvasZInline(FigureCanvasAgg):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
"""
def get_bytes(self, **kwargs):
"""
Get the byte representation of the figure.
Should only be used with jpg/png formats.
"""
# Make sure format is correct
fmt = kwargs.get('format', mpl_config.get('format'))
if fmt == 'svg':
raise ValueError("get_bytes() does not support svg, use png or jpg")
# Express the image as bytes
buf = BytesIO()
self.print_figure(buf, **kwargs)
fmt = fmt.encode()
if sys.version_info >= (3, 4) and sys.version_info < (3, 5):
byte_str = bytes("data:image/%s;base64," %fmt, "utf-8")
else:
byte_str = b"data:image/%s;base64," %fmt
byte_str += base64.b64encode(buf.getvalue())
# Python3 forces all strings to default to unicode, but for raster image
# formats (eg png, jpg), we want to work with bytes. Thus this step is
# needed to ensure compatability for all python versions.
byte_str = byte_str.decode('ascii')
buf.close()
return byte_str
def get_svg(self, **kwargs):
"""
Get the svg representation of the figure.
Should only be used with svg format.
"""
# Make sure format is correct
fmt = kwargs.get('format', mpl_config.get('format'))
if fmt != 'svg':
raise ValueError("get_svg() does not support png or jpg, use svg")
# For SVG the data string has to be unicode, not bytes
buf = StringIO()
self.print_figure(buf, **kwargs)
svg_str = buf.getvalue()
buf.close()
return svg_str
def draw_idle(self, *args, **kwargs):
"""
Called when the figure gets updated (eg through a plotting command).
This is overriden to allow open figures to be reshown after they
are updated when mpl_config.get('close') is False.
"""
if not self._is_idle_drawing:
with self._idle_draw_cntx():
self.draw(*args, **kwargs)
draw_if_interactive()
class FigureManagerZInline(FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
"""
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
self.fig_id = "figure_{0}".format(uuid.uuid4().hex)
self._shown = False
def angular_bind(self, **kwargs):
"""
Bind figure data to Zeppelin's Angular Object Registry.
If mpl_config("angular") is True and PY4J is supported, this allows
for the possibility to interactively update a figure from a separate
paragraph without having to display it multiple times.
"""
# This doesn't work for SVG so make sure it's not our format
fmt = kwargs.get('format', mpl_config.get('format'))
if fmt == 'svg':
return
# Get the figure data as a byte array
src = self.canvas.get_bytes(**kwargs)
# Flag to determine whether or not to use
# zeppelin's angular display system
angular = mpl_config.get('angular')
# ZeppelinContext instance (requires PY4J)
context = mpl_config.get('context')
# Finally we must ensure that automatic closing is set to False,
# as otherwise using the angular display system is pointless
close = mpl_config.get('close')
# If above conditions are met, bind the figure data to
# the Angular Object Registry.
if not close and angular:
if hasattr(context, 'angularBind'):
# Binding is performed through figure ID to ensure this works
# if multiple figures are open
context.angularBind(self.fig_id, src)
# Zeppelin will automatically replace this value even if it
# is updated from another pargraph thanks to the {{}} notation
src = "{{%s}}" %self.fig_id
else:
warnings.warn("Cannot bind figure to Angular Object Registry. "
"Check if PY4J is installed.")
return src
def angular_unbind(self):
"""
Unbind figure from angular display system.
"""
context = mpl_config.get('context')
if hasattr(context, 'angularUnbind'):
context.angularUnbind(self.fig_id)
def destroy(self):
"""
Called when close=True or implicitly by pyplot.close().
Overriden to automatically clean up the angular object registry.
"""
self.angular_unbind()
def show(self, **kwargs):
if not self._shown:
zdisplay(self.canvas.figure, **kwargs)
else:
self.canvas.draw_idle()
self.angular_bind(**kwargs)
self._shown = True
def draw_if_interactive():
"""
If interactive mode is on, this allows for updating properties of
the figure when each new plotting command is called.
"""
manager = Gcf.get_active()
interactive = matplotlib.is_interactive()
angular = mpl_config.get('angular')
# Don't bother continuing if we aren't in interactive mode
# or if there are no active figures. Also pointless to continue
# in angular mode as we don't want to reshow the figure.
if not interactive or angular or manager is None:
return
# Allow for figure to be reshown if close is false since
# this function call implies that it has been updated
if not mpl_config.get('close'):
manager._shown = False
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# if a main-level app must be created, this (and
# new_figure_manager_given_figure) is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasZInline(figure)
manager = FigureManagerZInline(canvas, num)
return manager
########################################################################
#
# Backend specific functions
#
########################################################################
def zdisplay(fig, **kwargs):
"""
Publishes a matplotlib figure to the notebook paragraph output.
"""
# kwargs can be width or height (in units supported by div tag)
width = kwargs.pop('width', 'auto')
height = kwargs.pop('height', 'auto')
fmt = kwargs.get('format', mpl_config.get('format'))
# Check if format is supported
supported_formats = mpl_config.get('supported_formats')
if fmt not in supported_formats:
raise ValueError("Unsupported format %s" %fmt)
# For SVG the data string has to be unicode, not bytes
if fmt == 'svg':
img = fig.canvas.get_svg(**kwargs)
# This is needed to ensure the SVG image is the correct size.
# We should find a better way to do this...
width = '{}px'.format(mpl_config.get('width'))
height = '{}px'.format(mpl_config.get('height'))
else:
# Express the image as bytes
src = fig.canvas.manager.angular_bind(**kwargs)
img = "<img src={src} style='width={width};height:{height}'>"
img = img.format(src=src, width=width, height=height)
# Print the image to the notebook paragraph via the %html magic
html = "<div style='width:{width};height:{height}'>{img}<div>"
print(html.format(width=width, height=height, img=img))
def displayhook():
"""
Called post paragraph execution if interactive mode is on
"""
if matplotlib.is_interactive():
show()
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
# Create a reference to the show function we are using. This is what actually
# gets called by matplotlib.pyplot.show().
show = Show()
# Default FigureCanvas and FigureManager classes to use from the backend
FigureCanvas = FigureCanvasZInline
FigureManager = FigureManagerZInline
| apache-2.0 |
mwv/scikit-learn | examples/feature_stacker.py | 246 | 1906 | """
=================================================
Concatenating multiple feature extraction methods
=================================================
In many real-world examples, there are many ways to extract features from a
dataset. Often it is beneficial to combine several methods to obtain good
performance. This example shows how to use ``FeatureUnion`` to combine
features obtained by PCA and univariate selection.
Combining features using this transformer has the benefit that it allows
cross validation and grid searches over the whole process.
The combination used in this example is not particularly helpful on this
dataset and is only used to illustrate the usage of FeatureUnion.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way to high-dimensional. Better do PCA:
pca = PCA(n_components=2)
# Maybe some original features where good, too?
selection = SelectKBest(k=1)
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
svm = SVC(kernel="linear")
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
| bsd-3-clause |
vrv/tensorflow | tensorflow/tools/dist_test/python/census_widendeep.py | 54 | 11900 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distributed training and evaluation of a wide and deep model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import sys
from six.moves import urllib
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.contrib.learn.python.learn.estimators import run_config
# Constants: Data download URLs
TRAIN_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.data"
TEST_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.test"
# Define features for the model
def census_model_config():
"""Configuration for the census Wide & Deep model.
Returns:
columns: Column names to retrieve from the data source
label_column: Name of the label column
wide_columns: List of wide columns
deep_columns: List of deep columns
categorical_column_names: Names of the categorical columns
continuous_column_names: Names of the continuous columns
"""
# 1. Categorical base columns.
gender = tf.contrib.layers.sparse_column_with_keys(
column_name="gender", keys=["female", "male"])
race = tf.contrib.layers.sparse_column_with_keys(
column_name="race",
keys=["Amer-Indian-Eskimo",
"Asian-Pac-Islander",
"Black",
"Other",
"White"])
education = tf.contrib.layers.sparse_column_with_hash_bucket(
"education", hash_bucket_size=1000)
marital_status = tf.contrib.layers.sparse_column_with_hash_bucket(
"marital_status", hash_bucket_size=100)
relationship = tf.contrib.layers.sparse_column_with_hash_bucket(
"relationship", hash_bucket_size=100)
workclass = tf.contrib.layers.sparse_column_with_hash_bucket(
"workclass", hash_bucket_size=100)
occupation = tf.contrib.layers.sparse_column_with_hash_bucket(
"occupation", hash_bucket_size=1000)
native_country = tf.contrib.layers.sparse_column_with_hash_bucket(
"native_country", hash_bucket_size=1000)
# 2. Continuous base columns.
age = tf.contrib.layers.real_valued_column("age")
age_buckets = tf.contrib.layers.bucketized_column(
age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
education_num = tf.contrib.layers.real_valued_column("education_num")
capital_gain = tf.contrib.layers.real_valued_column("capital_gain")
capital_loss = tf.contrib.layers.real_valued_column("capital_loss")
hours_per_week = tf.contrib.layers.real_valued_column("hours_per_week")
wide_columns = [
gender, native_country, education, occupation, workclass,
marital_status, relationship, age_buckets,
tf.contrib.layers.crossed_column([education, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([native_country, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([age_buckets, race, occupation],
hash_bucket_size=int(1e6))]
deep_columns = [
tf.contrib.layers.embedding_column(workclass, dimension=8),
tf.contrib.layers.embedding_column(education, dimension=8),
tf.contrib.layers.embedding_column(marital_status, dimension=8),
tf.contrib.layers.embedding_column(gender, dimension=8),
tf.contrib.layers.embedding_column(relationship, dimension=8),
tf.contrib.layers.embedding_column(race, dimension=8),
tf.contrib.layers.embedding_column(native_country, dimension=8),
tf.contrib.layers.embedding_column(occupation, dimension=8),
age, education_num, capital_gain, capital_loss, hours_per_week]
# Define the column names for the data sets.
columns = ["age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week",
"native_country", "income_bracket"]
label_column = "label"
categorical_columns = ["workclass", "education", "marital_status",
"occupation", "relationship", "race", "gender",
"native_country"]
continuous_columns = ["age", "education_num", "capital_gain",
"capital_loss", "hours_per_week"]
return (columns, label_column, wide_columns, deep_columns,
categorical_columns, continuous_columns)
class CensusDataSource(object):
"""Source of census data."""
def __init__(self, data_dir, train_data_url, test_data_url,
columns, label_column,
categorical_columns, continuous_columns):
"""Constructor of CensusDataSource.
Args:
data_dir: Directory to save/load the data files
train_data_url: URL from which the training data can be downloaded
test_data_url: URL from which the test data can be downloaded
columns: Columns to retrieve from the data files (A list of strings)
label_column: Name of the label column
categorical_columns: Names of the categorical columns (A list of strings)
continuous_columns: Names of the continuous columsn (A list of strings)
"""
# Retrieve data from disk (if available) or download from the web.
train_file_path = os.path.join(data_dir, "adult.data")
if os.path.isfile(train_file_path):
print("Loading training data from file: %s" % train_file_path)
train_file = open(train_file_path)
else:
urllib.urlretrieve(train_data_url, train_file_path)
test_file_path = os.path.join(data_dir, "adult.test")
if os.path.isfile(test_file_path):
print("Loading test data from file: %s" % test_file_path)
test_file = open(test_file_path)
else:
test_file = open(test_file_path)
urllib.urlretrieve(test_data_url, test_file_path)
# Read the training and testing data sets into Pandas DataFrame.
import pandas # pylint: disable=g-import-not-at-top
self._df_train = pandas.read_csv(train_file, names=columns,
skipinitialspace=True)
self._df_test = pandas.read_csv(test_file, names=columns,
skipinitialspace=True, skiprows=1)
# Remove the NaN values in the last rows of the tables
self._df_train = self._df_train[:-1]
self._df_test = self._df_test[:-1]
# Apply the threshold to get the labels.
income_thresh = lambda x: ">50K" in x
self._df_train[label_column] = (
self._df_train["income_bracket"].apply(income_thresh)).astype(int)
self._df_test[label_column] = (
self._df_test["income_bracket"].apply(income_thresh)).astype(int)
self.label_column = label_column
self.categorical_columns = categorical_columns
self.continuous_columns = continuous_columns
def input_train_fn(self):
return self._input_fn(self._df_train)
def input_test_fn(self):
return self._input_fn(self._df_test)
# TODO(cais): Turn into minibatch feeder
def _input_fn(self, df):
"""Input data function.
Creates a dictionary mapping from each continuous feature column name
(k) to the values of that column stored in a constant Tensor.
Args:
df: data feed
Returns:
feature columns and labels
"""
continuous_cols = {k: tf.constant(df[k].values)
for k in self.continuous_columns}
# Creates a dictionary mapping from each categorical feature column name (k)
# to the values of that column stored in a tf.SparseTensor.
categorical_cols = {
k: tf.SparseTensor(
indices=[[i, 0] for i in range(df[k].size)],
values=df[k].values,
dense_shape=[df[k].size, 1])
for k in self.categorical_columns}
# Merges the two dictionaries into one.
feature_cols = dict(continuous_cols.items() + categorical_cols.items())
# Converts the label column into a constant Tensor.
label = tf.constant(df[self.label_column].values)
# Returns the feature columns and the label.
return feature_cols, label
def _create_experiment_fn(output_dir): # pylint: disable=unused-argument
"""Experiment creation function."""
(columns, label_column, wide_columns, deep_columns, categorical_columns,
continuous_columns) = census_model_config()
census_data_source = CensusDataSource(FLAGS.data_dir,
TRAIN_DATA_URL, TEST_DATA_URL,
columns, label_column,
categorical_columns,
continuous_columns)
os.environ["TF_CONFIG"] = json.dumps({
"cluster": {
tf.contrib.learn.TaskType.PS: ["fake_ps"] *
FLAGS.num_parameter_servers
},
"task": {
"index": FLAGS.worker_index
}
})
config = run_config.RunConfig(master=FLAGS.master_grpc_url)
estimator = tf.contrib.learn.DNNLinearCombinedClassifier(
model_dir=FLAGS.model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[5],
config=config)
return tf.contrib.learn.Experiment(
estimator=estimator,
train_input_fn=census_data_source.input_train_fn,
eval_input_fn=census_data_source.input_test_fn,
train_steps=FLAGS.train_steps,
eval_steps=FLAGS.eval_steps
)
def main(unused_argv):
print("Worker index: %d" % FLAGS.worker_index)
learn_runner.run(experiment_fn=_create_experiment_fn,
output_dir=FLAGS.output_dir,
schedule=FLAGS.schedule)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--data_dir",
type=str,
default="/tmp/census-data",
help="Directory for storing the cesnsus data"
)
parser.add_argument(
"--model_dir",
type=str,
default="/tmp/census_wide_and_deep_model",
help="Directory for storing the model"
)
parser.add_argument(
"--output_dir",
type=str,
default="",
help="Base output directory."
)
parser.add_argument(
"--schedule",
type=str,
default="local_run",
help="Schedule to run for this experiment."
)
parser.add_argument(
"--master_grpc_url",
type=str,
default="",
help="URL to master GRPC tensorflow server, e.g.,grpc://127.0.0.1:2222"
)
parser.add_argument(
"--num_parameter_servers",
type=int,
default=0,
help="Number of parameter servers"
)
parser.add_argument(
"--worker_index",
type=int,
default=0,
help="Worker index (>=0)"
)
parser.add_argument(
"--train_steps",
type=int,
default=1000,
help="Number of training steps"
)
parser.add_argument(
"--eval_steps",
type=int,
default=1,
help="Number of evaluation steps"
)
global FLAGS # pylint:disable=global-at-module-level
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
cactusbin/nyt | matplotlib/lib/matplotlib/hatch.py | 6 | 6997 | """
Contains a classes for generating hatch patterns.
"""
from __future__ import print_function
import numpy as np
from matplotlib.path import Path
class HatchPatternBase:
"""
The base class for a hatch pattern.
"""
pass
class HorizontalHatch(HatchPatternBase):
def __init__(self, hatch, density):
self.num_lines = (hatch.count('-') + hatch.count('+')) * density
self.num_vertices = self.num_lines * 2
def set_vertices_and_codes(self, vertices, codes):
steps, stepsize = np.linspace(0.0, 1.0, self.num_lines, False,
retstep=True)
steps += stepsize / 2.
vertices[0::2, 0] = 0.0
vertices[0::2, 1] = steps
vertices[1::2, 0] = 1.0
vertices[1::2, 1] = steps
codes[0::2] = Path.MOVETO
codes[1::2] = Path.LINETO
class VerticalHatch(HatchPatternBase):
def __init__(self, hatch, density):
self.num_lines = (hatch.count('|') + hatch.count('+')) * density
self.num_vertices = self.num_lines * 2
def set_vertices_and_codes(self, vertices, codes):
steps, stepsize = np.linspace(0.0, 1.0, self.num_lines, False,
retstep=True)
steps += stepsize / 2.
vertices[0::2, 0] = steps
vertices[0::2, 1] = 0.0
vertices[1::2, 0] = steps
vertices[1::2, 1] = 1.0
codes[0::2] = Path.MOVETO
codes[1::2] = Path.LINETO
class NorthEastHatch(HatchPatternBase):
def __init__(self, hatch, density):
self.num_lines = (hatch.count('/') + hatch.count('x') +
hatch.count('X')) * density
if self.num_lines:
self.num_vertices = (self.num_lines + 1) * 2
else:
self.num_vertices = 0
def set_vertices_and_codes(self, vertices, codes):
steps = np.linspace(-0.5, 0.5, self.num_lines + 1, True)
vertices[0::2, 0] = 0.0 + steps
vertices[0::2, 1] = 0.0 - steps
vertices[1::2, 0] = 1.0 + steps
vertices[1::2, 1] = 1.0 - steps
codes[0::2] = Path.MOVETO
codes[1::2] = Path.LINETO
class SouthEastHatch(HatchPatternBase):
def __init__(self, hatch, density):
self.num_lines = (hatch.count('\\') + hatch.count('x') +
hatch.count('X')) * density
self.num_vertices = (self.num_lines + 1) * 2
if self.num_lines:
self.num_vertices = (self.num_lines + 1) * 2
else:
self.num_vertices = 0
def set_vertices_and_codes(self, vertices, codes):
steps = np.linspace(-0.5, 0.5, self.num_lines + 1, True)
vertices[0::2, 0] = 0.0 + steps
vertices[0::2, 1] = 1.0 + steps
vertices[1::2, 0] = 1.0 + steps
vertices[1::2, 1] = 0.0 + steps
codes[0::2] = Path.MOVETO
codes[1::2] = Path.LINETO
class Shapes(HatchPatternBase):
filled = False
def __init__(self, hatch, density):
if self.num_rows == 0:
self.num_shapes = 0
self.num_vertices = 0
else:
self.num_shapes = ((self.num_rows / 2 + 1) * (self.num_rows + 1) +
(self.num_rows / 2) * (self.num_rows))
self.num_vertices = (self.num_shapes *
len(self.shape_vertices) *
(self.filled and 1 or 2))
def set_vertices_and_codes(self, vertices, codes):
offset = 1.0 / self.num_rows
shape_vertices = self.shape_vertices * offset * self.size
if not self.filled:
inner_vertices = shape_vertices[::-1] * 0.9
shape_codes = self.shape_codes
shape_size = len(shape_vertices)
cursor = 0
for row in xrange(self.num_rows + 1):
if row % 2 == 0:
cols = np.linspace(0.0, 1.0, self.num_rows + 1, True)
else:
cols = np.linspace(offset / 2.0, 1.0 - offset / 2.0,
self.num_rows, True)
row_pos = row * offset
for col_pos in cols:
vertices[cursor:cursor + shape_size] = (shape_vertices +
(col_pos, row_pos))
codes[cursor:cursor + shape_size] = shape_codes
cursor += shape_size
if not self.filled:
vertices[cursor:cursor + shape_size] = (inner_vertices +
(col_pos, row_pos))
codes[cursor:cursor + shape_size] = shape_codes
cursor += shape_size
class Circles(Shapes):
def __init__(self, hatch, density):
path = Path.unit_circle()
self.shape_vertices = path.vertices
self.shape_codes = path.codes
Shapes.__init__(self, hatch, density)
class SmallCircles(Circles):
size = 0.2
def __init__(self, hatch, density):
self.num_rows = (hatch.count('o')) * density
Circles.__init__(self, hatch, density)
class LargeCircles(Circles):
size = 0.35
def __init__(self, hatch, density):
self.num_rows = (hatch.count('O')) * density
Circles.__init__(self, hatch, density)
class SmallFilledCircles(SmallCircles):
size = 0.1
filled = True
def __init__(self, hatch, density):
self.num_rows = (hatch.count('.')) * density
Circles.__init__(self, hatch, density)
class Stars(Shapes):
size = 1.0 / 3.0
filled = True
def __init__(self, hatch, density):
self.num_rows = (hatch.count('*')) * density
path = Path.unit_regular_star(5)
self.shape_vertices = path.vertices
self.shape_codes = np.ones(len(self.shape_vertices)) * Path.LINETO
self.shape_codes[0] = Path.MOVETO
Shapes.__init__(self, hatch, density)
_hatch_types = [
HorizontalHatch,
VerticalHatch,
NorthEastHatch,
SouthEastHatch,
SmallCircles,
LargeCircles,
SmallFilledCircles,
Stars
]
def get_path(hatchpattern, density=6):
"""
Given a hatch specifier, *hatchpattern*, generates Path to render
the hatch in a unit square. *density* is the number of lines per
unit square.
"""
density = int(density)
patterns = [hatch_type(hatchpattern, density)
for hatch_type in _hatch_types]
num_vertices = sum([pattern.num_vertices for pattern in patterns])
if num_vertices == 0:
return Path(np.empty((0, 2)))
vertices = np.empty((num_vertices, 2))
codes = np.empty((num_vertices,), np.uint8)
cursor = 0
for pattern in patterns:
if pattern.num_vertices != 0:
vertices_chunk = vertices[cursor:cursor + pattern.num_vertices]
codes_chunk = codes[cursor:cursor + pattern.num_vertices]
pattern.set_vertices_and_codes(vertices_chunk, codes_chunk)
cursor += pattern.num_vertices
return Path(vertices, codes)
| unlicense |
abhishek8gupta/sp17-i524 | project/S17-IO-3012/code/bin/benchmark_version_import.py | 19 | 4590 | import matplotlib.pyplot as plt
import sys
import pandas as pd
def get_parm():
"""retrieves mandatory parameter to program
@param: none
@type: n/a
"""
try:
return sys.argv[1]
except:
print ('Must enter file name as parameter')
exit()
def read_file(filename):
"""reads a file into a pandas dataframe
@param: filename The name of the file to read
@type: string
"""
try:
return pd.read_csv(filename)
except:
print ('Error retrieving file')
exit()
def select_data(benchmark_df, mongo_version, config_replicas, mongos_instances, shard_replicas, shards_per_replica):
benchmark_df = benchmark_df[benchmark_df.cloud == "chameleon"]
benchmark_df = benchmark_df[benchmark_df.test_size == "large"]
if mongo_version != 'X':
benchmark_df = benchmark_df[benchmark_df.mongo_version == mongo_version]
if config_replicas != 'X':
benchmark_df = benchmark_df[benchmark_df.config_replicas == config_replicas]
if mongos_instances != 'X':
benchmark_df = benchmark_df[benchmark_df.mongos_instances == mongos_instances]
if shard_replicas != 'X':
benchmark_df = benchmark_df[benchmark_df.shard_replicas == shard_replicas]
if shards_per_replica != 'X':
benchmark_df = benchmark_df[benchmark_df.shards_per_replica == shards_per_replica]
#benchmark_df1 = benchmark_df.groupby(['cloud', 'config_replicas', 'mongos_instances', 'shard_replicas', 'shards_per_replica']).mean()
#http://stackoverflow.com/questions/10373660/converting-a-pandas-groupby-object-to-dataframe
benchmark_df = benchmark_df.groupby(['cloud', 'config_replicas', 'mongos_instances', 'shard_replicas', 'shards_per_replica'], as_index=False).mean()
#http://stackoverflow.com/questions/10373660/converting-a-pandas-groupby-object-to-dataframe
#print benchmark_df1['shard_replicas']
#print benchmark_df1
#print benchmark_df
benchmark_df = benchmark_df.sort_values(by='shard_replicas', ascending=1)
return benchmark_df
def make_figure(import_seconds_32, shards_32, import_seconds_34, shards_34):
"""formats and creates a line chart
@param1: find_seconds_kilo Array with find_seconds from kilo
@type: numpy array
@param2: shards_kilo Array with shards from kilo
@type: numpy array
@param3: find_seconds_chameleon Array with find_seconds from chameleon
@type: numpy array
@param4: shards_chameleon Array with shards from chameleon
@type: numpy array
"""
fig = plt.figure()
#plt.title('Average MongoImport Runtime with Various Numbers of Shards')
plt.ylabel('Runtime in Seconds')
plt.xlabel('Number of Shards')
# Make the chart
plt.plot(shards_32, import_seconds_32, label='Version 3.2')
plt.plot(shards_34, import_seconds_34, label='Version 3.4')
#http://stackoverflow.com/questions/11744990/how-to-set-auto-for-upper-limit-but-keep-a-fixed-lower-limit-with-matplotlib
plt.ylim(ymin=0)
plt.legend(loc='best')
# Show the chart (for testing)
# plt.show()
# Save the chart
fig.savefig('../report/version_import.png')
# Run the program by calling the functions
if __name__ == "__main__":
filename = get_parm()
benchmark_df = read_file(filename)
mongo_version = 32
config_replicas = 1
mongos_instances = 1
shard_replicas = 'X'
shards_per_replica = 1
select_df = select_data(benchmark_df, mongo_version, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
#http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
#percentage death=\
import_seconds_32=select_df.as_matrix(columns=[select_df.columns[6]])
shards_32 = select_df.as_matrix(columns=[select_df.columns[3]])
#http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
mongo_version = 34
config_replicas = 1
mongos_instances = 1
shard_replicas = 'X'
shards_per_replica = 1
select_df = select_data(benchmark_df, mongo_version, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
#http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
#percentage death=\
import_seconds_34=select_df.as_matrix(columns=[select_df.columns[6]])
shards_34 = select_df.as_matrix(columns=[select_df.columns[3]])
#http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
make_figure(import_seconds_32, shards_32, import_seconds_34, shards_34)
| apache-2.0 |
fmfn/UnbalancedDataset | imblearn/under_sampling/_prototype_selection/tests/test_allknn.py | 3 | 8774 | """Test the module repeated edited nearest neighbour."""
# Authors: Guillaume Lemaitre <[email protected]>
# Christos Aridas
# License: MIT
import pytest
import numpy as np
from sklearn.utils._testing import assert_allclose, assert_array_equal
from sklearn.neighbors import NearestNeighbors
from sklearn.datasets import make_classification
from imblearn.under_sampling import AllKNN
X = np.array(
[
[-0.12840393, 0.66446571],
[1.32319756, -0.13181616],
[0.04296502, -0.37981873],
[0.83631853, 0.18569783],
[1.02956816, 0.36061601],
[1.12202806, 0.33811558],
[-0.53171468, -0.53735182],
[1.3381556, 0.35956356],
[-0.35946678, 0.72510189],
[1.32326943, 0.28393874],
[2.94290565, -0.13986434],
[0.28294738, -1.00125525],
[0.34218094, -0.58781961],
[-0.88864036, -0.33782387],
[-1.10146139, 0.91782682],
[-0.7969716, -0.50493969],
[0.73489726, 0.43915195],
[0.2096964, -0.61814058],
[-0.28479268, 0.70459548],
[1.84864913, 0.14729596],
[1.59068979, -0.96622933],
[0.73418199, -0.02222847],
[0.50307437, 0.498805],
[0.84929742, 0.41042894],
[0.62649535, 0.46600596],
[0.79270821, -0.41386668],
[1.16606871, -0.25641059],
[1.57356906, 0.30390519],
[1.0304995, -0.16955962],
[1.67314371, 0.19231498],
[0.98382284, 0.37184502],
[0.48921682, -1.38504507],
[-0.46226554, -0.50481004],
[-0.03918551, -0.68540745],
[0.24991051, -1.00864997],
[0.80541964, -0.34465185],
[0.1732627, -1.61323172],
[0.69804044, 0.44810796],
[-0.5506368, -0.42072426],
[-0.34474418, 0.21969797],
]
)
Y = np.array(
[
1,
2,
2,
2,
1,
1,
0,
2,
1,
1,
1,
2,
2,
0,
1,
2,
1,
2,
1,
1,
2,
2,
1,
1,
1,
2,
2,
2,
2,
1,
1,
2,
0,
2,
2,
2,
2,
1,
2,
0,
]
)
R_TOL = 1e-4
def test_allknn_fit_resample():
allknn = AllKNN()
X_resampled, y_resampled = allknn.fit_resample(X, Y)
X_gt = np.array(
[
[-0.53171468, -0.53735182],
[-0.88864036, -0.33782387],
[-0.46226554, -0.50481004],
[-0.34474418, 0.21969797],
[1.02956816, 0.36061601],
[1.12202806, 0.33811558],
[-1.10146139, 0.91782682],
[0.73489726, 0.43915195],
[0.50307437, 0.498805],
[0.84929742, 0.41042894],
[0.62649535, 0.46600596],
[0.98382284, 0.37184502],
[0.69804044, 0.44810796],
[0.04296502, -0.37981873],
[0.28294738, -1.00125525],
[0.34218094, -0.58781961],
[0.2096964, -0.61814058],
[1.59068979, -0.96622933],
[0.73418199, -0.02222847],
[0.79270821, -0.41386668],
[1.16606871, -0.25641059],
[1.0304995, -0.16955962],
[0.48921682, -1.38504507],
[-0.03918551, -0.68540745],
[0.24991051, -1.00864997],
[0.80541964, -0.34465185],
[0.1732627, -1.61323172],
]
)
y_gt = np.array(
[
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
]
)
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_allclose(y_resampled, y_gt, rtol=R_TOL)
def test_all_knn_allow_minority():
X, y = make_classification(
n_samples=10000,
n_features=2,
n_informative=2,
n_redundant=0,
n_repeated=0,
n_classes=3,
n_clusters_per_class=1,
weights=[0.2, 0.3, 0.5],
class_sep=0.4,
random_state=0,
)
allknn = AllKNN(allow_minority=True)
X_res_1, y_res_1 = allknn.fit_resample(X, y)
allknn = AllKNN()
X_res_2, y_res_2 = allknn.fit_resample(X, y)
assert len(y_res_1) < len(y_res_2)
def test_allknn_fit_resample_mode():
allknn = AllKNN(kind_sel="mode")
X_resampled, y_resampled = allknn.fit_resample(X, Y)
X_gt = np.array(
[
[-0.53171468, -0.53735182],
[-0.88864036, -0.33782387],
[-0.46226554, -0.50481004],
[-0.34474418, 0.21969797],
[-0.12840393, 0.66446571],
[1.02956816, 0.36061601],
[1.12202806, 0.33811558],
[-0.35946678, 0.72510189],
[-1.10146139, 0.91782682],
[0.73489726, 0.43915195],
[-0.28479268, 0.70459548],
[0.50307437, 0.498805],
[0.84929742, 0.41042894],
[0.62649535, 0.46600596],
[0.98382284, 0.37184502],
[0.69804044, 0.44810796],
[1.32319756, -0.13181616],
[0.04296502, -0.37981873],
[0.28294738, -1.00125525],
[0.34218094, -0.58781961],
[0.2096964, -0.61814058],
[1.59068979, -0.96622933],
[0.73418199, -0.02222847],
[0.79270821, -0.41386668],
[1.16606871, -0.25641059],
[1.0304995, -0.16955962],
[0.48921682, -1.38504507],
[-0.03918551, -0.68540745],
[0.24991051, -1.00864997],
[0.80541964, -0.34465185],
[0.1732627, -1.61323172],
]
)
y_gt = np.array(
[
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
]
)
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_allknn_fit_resample_with_nn_object():
nn = NearestNeighbors(n_neighbors=4)
allknn = AllKNN(n_neighbors=nn, kind_sel="mode")
X_resampled, y_resampled = allknn.fit_resample(X, Y)
X_gt = np.array(
[
[-0.53171468, -0.53735182],
[-0.88864036, -0.33782387],
[-0.46226554, -0.50481004],
[-0.34474418, 0.21969797],
[-0.12840393, 0.66446571],
[1.02956816, 0.36061601],
[1.12202806, 0.33811558],
[-0.35946678, 0.72510189],
[-1.10146139, 0.91782682],
[0.73489726, 0.43915195],
[-0.28479268, 0.70459548],
[0.50307437, 0.498805],
[0.84929742, 0.41042894],
[0.62649535, 0.46600596],
[0.98382284, 0.37184502],
[0.69804044, 0.44810796],
[1.32319756, -0.13181616],
[0.04296502, -0.37981873],
[0.28294738, -1.00125525],
[0.34218094, -0.58781961],
[0.2096964, -0.61814058],
[1.59068979, -0.96622933],
[0.73418199, -0.02222847],
[0.79270821, -0.41386668],
[1.16606871, -0.25641059],
[1.0304995, -0.16955962],
[0.48921682, -1.38504507],
[-0.03918551, -0.68540745],
[0.24991051, -1.00864997],
[0.80541964, -0.34465185],
[0.1732627, -1.61323172],
]
)
y_gt = np.array(
[
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
]
)
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_alknn_not_good_object():
nn = "rnd"
allknn = AllKNN(n_neighbors=nn, kind_sel="mode")
with pytest.raises(ValueError):
allknn.fit_resample(X, Y)
| mit |
JeanKossaifi/scikit-learn | sklearn/svm/classes.py | 126 | 40114 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
from ..utils.validation import _num_samples
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive'
(default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to pre-compute the kernel matrix from data matrices; that matrix
should be an array of shape ``(n_samples, n_samples)``.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
cojacoo/echoRD_model | echoRD/vG_conv.py | 1 | 10962 | #Van Genuchten Conversions
#(cc) [email protected]
import numpy as np
import pandas as pd
# standard parameters after Carsel & Parrish 1988
carsel=pd.DataFrame(
[[ 'C', 30., 15., 55., 0.068, 0.38, 0.008*100., 1.09, 0.200/360000.],
[ 'CL', 37., 30., 33., 0.095, 0.41, 0.019*100., 1.31, 0.258/360000.],
[ 'L', 40., 40., 20., 0.078, 0.43, 0.036*100., 1.56, 1.042/360000.],
[ 'LS', 13., 81., 6., 0.057, 0.43, 0.124*100., 2.28, 14.592/360000.],
[ 'S', 4., 93., 3., 0.045, 0.43, 0.145*100., 2.68, 29.700/360000.],
[ 'SC', 11., 48., 41., 0.100, 0.38, 0.027*100., 1.23, 0.121/360000.],
[ 'SCL', 19., 54., 27., 0.100, 0.39, 0.059*100., 1.48, 1.308/360000.],
[ 'SI', 85., 6., 9., 0.034, 0.46, 0.016*100., 1.37, 0.250/360000.],
[ 'SIC', 48., 6., 46., 0.070, 0.36, 0.005*100., 1.09, 0.021/360000.],
['SICL', 59., 8., 33., 0.089, 0.43, 0.010*100., 1.23, 0.071/360000.],
[ 'SIL', 65., 17., 18., 0.067, 0.45, 0.020*100., 1.41, 0.450/360000.],
[ 'SL', 26., 63., 11., 0.065, 0.41, 0.075*100., 1.89, 4.421/360000.]],
columns=['Typ','Silt','Sand','Clay','thr','ths','alpha','n','ks'],index=np.arange(12).astype(int)+1)
# conversions
def ku_psi(psi, ks, alpha, n, m=None, l=0.5):
#Calculate unsaturated hydraulic conductivity (ku) from matrix head (psi)
if m is None:
m=1.-1./n
v = 1. + (alpha*np.abs(psi))**n
ku = ks* v**(-1.*m*l) * (1. - (1. - 1/v)**m)**2
return ku
def ku_thst(thst, ks, alpha, n, m=None, l=0.5):
#Calculate unsaturated hydraulic conductivity (ku) relative saturation (theta*)
if m is None:
m=1.-1./n
ku = ks*thst**l * (1 - (1-thst**(1/m))**m)**2#
return ku
def ku_theta(theta, ths, thr, ks, alpha, n, m=None):
#Calculate unsaturated hydraulic conductivity (ku) from matrix head (psi)
if m is None:
m=1.-1./n
th_star=thst_theta(theta,ths,thr)
ku = ku_thst(th_star,ks, alpha, n, m)
return ku
def thst_theta(theta,ths,thr):
#Calculate relative saturation (theta*) from soil moisture (theta)
th_star=(theta-thr)/(ths-thr) #
return th_star
def theta_thst(th_star,ths,thr):
#Calculate soil moisture (theta) from relative saturation (theta*)
theta=th_star*(ths-thr)+thr
return theta
def theta_psi(psi,ths,thr,alpha,n,m=None):
#Calculate soil moisture (theta) from matrix head (psi)
if m is None:
m=1.-1./n
theta=theta_thst(thst_psi(psi,alpha,n,m),ths,thr)
return theta
def psi_thst(th_star,alpha,n,m=None):
#Calculate matrix head (psi) from relative saturation (theta*)
if m is None:
m=1.-1./n
psi = -1./alpha * ( (1-th_star**(1/m))/(th_star**(1/m)) )**(1./n)
if (np.iterable(psi)) & any(np.isinf(psi)):
if type(alpha)==float:
psi[np.isinf(psi)]=(-1./alpha * ( (1-0.98**(1/m))/(0.98**(1/m)) )**(1./n))
else:
psi[np.isinf(psi)]=(-1./alpha * ( (1-0.98**(1/m))/(0.98**(1/m)) )**(1./n))[np.isinf(psi)]
return psi
def psi_theta(theta,ths,thr,alpha,n,m=None):
#Calculate matrix head (psi) from soil moisture (theta)
if m is None:
m=1.-1./n
th_star=thst_theta(theta,ths,thr)
psi= -1. * ( (1 - th_star**(1./m)) / (th_star**(1./m)) )**(1./n) / alpha
if (np.iterable(psi)) & any(np.isinf(psi)):
if type(alpha)==float:
psi[np.isinf(psi)]=(-1./alpha * ( (1-0.98**(1/m))/(0.98**(1/m)) )**(1./n))
else:
psi[np.isinf(psi)]=(-1./alpha * ( (1-0.98**(1/m))/(0.98**(1/m)) )**(1./n))[np.isinf(psi)]
return psi
def thst_psi(psi,alpha,n,m=None):
#Calculate relative saturation (theta*) from matrix head (psi)
if m is None:
m=1.-1./n
th_star = (1./(1.+(np.abs(psi)*alpha)**n))**m#
return th_star
def c_psi(psi,ths,thr,alpha,n,m=None):
#Calculate water capacity (c) from matrix head (psi)
if m is None:
m=1.-1./n
c=-1.*(ths-thr)*n*m* alpha**n * np.abs(psi)**(n-1.) * (1+(alpha*np.abs(psi))**n)**(-1.*m - 1.)
#y=m*(1./(1+np.abs(psi*alpha)**n))**(m+1.) * n *(np.abs(psi)*alpha)**(n-1.) *alpha
#c=(ths-thr)*y
return c
def dpsidtheta_thst(th_star,ths,thr,alpha,n,m=None):
#Calculate matrix head (psi) from relative saturation (theta*)
if m is None:
m=1.-1./n
if (type(th_star)==float) | (type(th_star)==np.float64):
if th_star>0.9899:
th_star=0.9899
if th_star<0.01:
th_star=0.01
else:
th_star[th_star>0.9899]=0.9899
th_star[th_star<0.01]=0.01
th_star1=th_star-0.01
th_star+=0.01
psi = -1./alpha * ( (1-th_star**(1/m))/(th_star**(1/m)) )**(1./n)
psist = -1./alpha * ( (1-th_star1**(1/m))/(th_star1**(1/m)) )**(1./n)
if np.iterable(psi):
psi[np.isinf(psi)]=-1./alpha * ( (1-0.98**(1/m))/(0.98**(1/m)) )**(1./n)
if np.iterable(psist):
psist[np.isinf(psist)]=-1./alpha * ( (1-0.98**(1/m))/(0.98**(1/m)) )**(1./n)
theta=th_star*(ths-thr)+thr
thetast=th_star1*(ths-thr)+thr
dpsidtheta=(psist-psi)/(thetast-theta)
return dpsidtheta
def D_psi(psi,ks,ths,thr,alpha,n,m=None):
#Calculate diffusivity (D) from matrix head (psi)
if m is None:
m=1.-1./n
psix=np.array([psi-0.05,psi,psi+0.05])
if isinstance(ks,np.float):
kus=ku_psi(psix, ks, alpha, n, m)
dth=np.diff(theta_thst(thst_psi(psix,alpha,n,m),ths,thr),axis=0)[0]
else:
kus=ku_psi(psix, ks.repeat(3).reshape(np.shape(psix)), alpha.repeat(3).reshape(np.shape(psix)), n.repeat(3).reshape(np.shape(psix)), m.repeat(3).reshape(np.shape(psix)))
dth=np.diff(theta_thst(thst_psi(psix,alpha.repeat(3).reshape(np.shape(psix)),n.repeat(3).reshape(np.shape(psix)),m.repeat(3).reshape(np.shape(psix))),ths.repeat(3).reshape(np.shape(psix)),thr.repeat(3).reshape(np.shape(psix))),axis=0)[0]
if len(np.shape(kus))==1:
D=kus[1]*0.1/dth
else:
D=kus[1,:]*0.1/dth
return D
def dDdtheta_thst(th_star,ths,thr,ks,alpha,n,m=None):
#Calculate matrix head (psi) from relative saturation (theta*)
if m is None:
m=1.-1./n
if (type(th_star)==float) | (type(th_star)==np.float64):
if th_star>0.9899:
th_star=0.9899
if th_star<0.01:
th_star=0.01
else:
th_star[th_star>0.9899]=0.9899
th_star[th_star<0.01]=0.01
th_star1=th_star-0.01
th_star+=0.01
D=D_thst(th_star,ths,thr,ks,alpha,n,m)
Dst=D_thst(th_star1,ths,thr,ks,alpha,n,m)
theta=th_star*(ths-thr)+thr
thetast=th_star1*(ths-thr)+thr
dDdtheta=(Dst-D)/(thetast-theta)
return dDdtheta
def D_theta(theta,ths,thr,ks,alpha,n,m=None):
#Calculate diffusivity (D) from soil moisture (theta)
if m is None:
m=1.-1./n
the=(theta-thr)/(ths-thr)
Dd=(ks*(1-m)*(the**(0.5-(1/m)))) / (alpha*m*(ths-thr)) *( (1-the**(1/m))**(-1*m) + (1-the**(1/m))**m -2 )
return Dd
def D_thst(thst,ths,thr,ks,alpha,n,m=None):
#Calculate diffusivity (D) from soil moisture (theta)
if m is None:
m=1.-1./n
Dd=(ks*(1.-m)*(thst**(0.5-(1./m)))) / (alpha*m*(ths-thr)) *( (1.-thst**(1./m))**(-1.*m) + (1.-thst**(1./m))**m -2. )
return Dd
def dcst_thst(thst,ths,thr,ks,alpha,n,m=None):
#Calculate diffusivity (D as ku*dpsi/dthst) from soil moisture (theta)
if m is None:
m=1.-1./n
c=c_psi(psi_thst(thst,alpha,n,m),ths,thr,alpha,n,m)
ku=ku_thst(thst, ks, alpha,n,m)
D=-ku/(c*theta_thst(thst,ths,thr))
return D
# wrapper
def th_psi_f(psi,sample,mc):
if (isinstance(psi,pd.DataFrame)) or (isinstance(psi,pd.Series)):
theta=theta_psi(psi.values, mc.soilmatrix.ts[sample].values, mc.soilmatrix.tr[sample].values, mc.soilmatrix.alpha[sample].values, mc.soilmatrix.n[sample].values)
else:
theta=theta_psi(psi, mc.soilmatrix.ts[sample].values, mc.soilmatrix.tr[sample].values, mc.soilmatrix.alpha[sample].values, mc.soilmatrix.n[sample].values)
return theta
def psi_th_f(theta,sample,mc):
if (isinstance(theta,pd.DataFrame)) or (isinstance(theta,pd.Series)):
psi=psi_theta(theta.values, mc.soilmatrix.ts[sample].values, mc.soilmatrix.tr[sample].values, mc.soilmatrix.alpha[sample].values, mc.soilmatrix.n[sample].values)
else:
psi=psi_theta(theta, mc.soilmatrix.ts[sample].values, mc.soilmatrix.tr[sample].values, mc.soilmatrix.alpha[sample].values, mc.soilmatrix.n[sample].values)
return psi
def psi_ths_f(thst,sample,mc):
if (isinstance(thst,pd.DataFrame)) or (isinstance(thst,pd.Series)):
psi=psi_thst(thst.values, mc.soilmatrix.alpha[sample].values, mc.soilmatrix.n[sample].values)
else:
psi=psi_thst(thst, mc.soilmatrix.alpha[sample].values, mc.soilmatrix.n[sample].values)
return psi
def D_psi_f(psi,sample,mc):
if (isinstance(psi,pd.DataFrame)) or (isinstance(psi,pd.Series)):
D=D_psi(psi.values, mc.soilmatrix.ks[sample].values, mc.soilmatrix.ts[sample].values, mc.soilmatrix.tr[sample].values, mc.soilmatrix.alpha[sample].values, mc.soilmatrix.n[sample].values)
else:
D=D_psi(psi, mc.soilmatrix.ks[sample].values, mc.soilmatrix.ts[sample].values, mc.soilmatrix.tr[sample].values, mc.soilmatrix.alpha[sample].values, mc.soilmatrix.n[sample].values)
return D
def D_thst_f(thst,sample,mc):
if (isinstance(thst,pd.DataFrame)) or (isinstance(thst,pd.Series)):
D=D_thst(thst.values, mc.soilmatrix.ts[sample].values, mc.soilmatrix.tr[sample].values, mc.soilmatrix.ks[sample].values, mc.soilmatrix.alpha[sample].values, mc.soilmatrix.n[sample].values)
else:
D=D_thst(thst, mc.soilmatrix.ts[sample].values, mc.soilmatrix.tr[sample].values, mc.soilmatrix.ks[sample].values, mc.soilmatrix.alpha[sample].values, mc.soilmatrix.n[sample].values)
return D
def ku_psi_f(psi,sample,mc):
if (isinstance(psi,pd.DataFrame)) or (isinstance(psi,pd.Series)):
ku=ku_psi(psi.values, mc.soilmatrix.ks[sample].values, mc.soilmatrix.alpha[sample].values, mc.soilmatrix.n[sample].values)
else:
ku=ku_psi(psi, mc.soilmatrix.ks[sample].values, mc.soilmatrix.alpha[sample].values, mc.soilmatrix.n[sample].values)
return ku
def Dku_thst_f(thst,sample,mc):
if (isinstance(thst,pd.DataFrame)) or (isinstance(thst,pd.Series)):
psi=psi_thst(thst.values,mc.soilmatrix.alpha[sample].values, mc.soilmatrix.n[sample].values)
else:
psi=psi_thst(thst,mc.soilmatrix.alpha[sample].values, mc.soilmatrix.n[sample].values)
ku=ku_psi(psi, mc.soilmatrix.ks[sample].values, mc.soilmatrix.alpha[sample].values, mc.soilmatrix.n[sample].values)
D=D_psi(psi, mc.soilmatrix.ks[sample].values, mc.soilmatrix.ts[sample].values, mc.soilmatrix.tr[sample].values, mc.soilmatrix.alpha[sample].values, mc.soilmatrix.n[sample].values)
theta=theta_thst(thst,mc.soilmatrix.ts[sample].values, mc.soilmatrix.tr[sample].values)
return (D, ku, theta)
| gpl-3.0 |
anntzer/scikit-learn | asv_benchmarks/benchmarks/linear_model.py | 12 | 7307 | from sklearn.linear_model import (LogisticRegression, Ridge, ElasticNet, Lasso,
LinearRegression, SGDRegressor)
from .common import Benchmark, Estimator, Predictor
from .datasets import (_20newsgroups_highdim_dataset,
_20newsgroups_lowdim_dataset,
_synth_regression_dataset,
_synth_regression_sparse_dataset)
from .utils import make_gen_classif_scorers, make_gen_reg_scorers
class LogisticRegressionBenchmark(Predictor, Estimator, Benchmark):
"""
Benchmarks for LogisticRegression.
"""
param_names = ['representation', 'solver', 'n_jobs']
params = (['dense', 'sparse'], ['lbfgs', 'saga'], Benchmark.n_jobs_vals)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
representation, solver, n_jobs = params
if Benchmark.data_size == 'large':
if representation == 'sparse':
data = _20newsgroups_highdim_dataset(n_samples=10000)
else:
data = _20newsgroups_lowdim_dataset(n_components=1e3)
else:
if representation == 'sparse':
data = _20newsgroups_highdim_dataset(n_samples=2500)
else:
data = _20newsgroups_lowdim_dataset()
return data
def make_estimator(self, params):
representation, solver, n_jobs = params
penalty = 'l2' if solver == 'lbfgs' else 'l1'
estimator = LogisticRegression(solver=solver,
penalty=penalty,
multi_class='multinomial',
tol=0.01,
n_jobs=n_jobs,
random_state=0)
return estimator
def make_scorers(self):
make_gen_classif_scorers(self)
class RidgeBenchmark(Predictor, Estimator, Benchmark):
"""
Benchmarks for Ridge.
"""
param_names = ['representation', 'solver']
params = (['dense', 'sparse'],
['auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'])
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
representation, solver = params
if representation == 'dense':
data = _synth_regression_dataset(n_samples=500000, n_features=100)
else:
data = _synth_regression_sparse_dataset(n_samples=100000,
n_features=10000,
density=0.005)
return data
def make_estimator(self, params):
representation, solver = params
estimator = Ridge(solver=solver,
fit_intercept=False,
random_state=0)
return estimator
def make_scorers(self):
make_gen_reg_scorers(self)
def skip(self, params):
representation, solver = params
if representation == 'sparse' and solver == 'svd':
return True
return False
class LinearRegressionBenchmark(Predictor, Estimator, Benchmark):
"""
Benchmarks for Linear Reagression.
"""
param_names = ['representation']
params = (['dense', 'sparse'],)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
representation, = params
if representation == 'dense':
data = _synth_regression_dataset(n_samples=1000000, n_features=100)
else:
data = _synth_regression_sparse_dataset(n_samples=10000,
n_features=100000,
density=0.01)
return data
def make_estimator(self, params):
estimator = LinearRegression()
return estimator
def make_scorers(self):
make_gen_reg_scorers(self)
class SGDRegressorBenchmark(Predictor, Estimator, Benchmark):
"""
Benchmark for SGD
"""
param_names = ['representation']
params = (['dense', 'sparse'],)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
representation, = params
if representation == 'dense':
data = _synth_regression_dataset(n_samples=100000, n_features=200)
else:
data = _synth_regression_sparse_dataset(n_samples=100000,
n_features=1000,
density=0.01)
return data
def make_estimator(self, params):
estimator = SGDRegressor(max_iter=1000,
tol=1e-16,
random_state=0)
return estimator
def make_scorers(self):
make_gen_reg_scorers(self)
class ElasticNetBenchmark(Predictor, Estimator, Benchmark):
"""
Benchmarks for ElasticNet.
"""
param_names = ['representation', 'precompute']
params = (['dense', 'sparse'], [True, False])
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
representation, precompute = params
if representation == 'dense':
data = _synth_regression_dataset(n_samples=1000000, n_features=100)
else:
data = _synth_regression_sparse_dataset(n_samples=50000,
n_features=5000,
density=0.01)
return data
def make_estimator(self, params):
representation, precompute = params
estimator = ElasticNet(precompute=precompute,
alpha=0.001,
random_state=0)
return estimator
def make_scorers(self):
make_gen_reg_scorers(self)
def skip(self, params):
representation, precompute = params
if representation == 'sparse' and precompute is False:
return True
return False
class LassoBenchmark(Predictor, Estimator, Benchmark):
"""
Benchmarks for Lasso.
"""
param_names = ['representation', 'precompute']
params = (['dense', 'sparse'], [True, False])
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
representation, precompute = params
if representation == 'dense':
data = _synth_regression_dataset(n_samples=1000000, n_features=100)
else:
data = _synth_regression_sparse_dataset(n_samples=50000,
n_features=5000,
density=0.01)
return data
def make_estimator(self, params):
representation, precompute = params
estimator = Lasso(precompute=precompute,
alpha=0.001,
random_state=0)
return estimator
def make_scorers(self):
make_gen_reg_scorers(self)
def skip(self, params):
representation, precompute = params
if representation == 'sparse' and precompute is False:
return True
return False
| bsd-3-clause |
abhisg/scikit-learn | sklearn/covariance/__init__.py | 389 | 1157 | """
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
| bsd-3-clause |
geopandas/geopandas | geopandas/tools/overlay.py | 1 | 13915 | import warnings
from functools import reduce
import numpy as np
import pandas as pd
from geopandas import GeoDataFrame, GeoSeries
from geopandas.array import _check_crs, _crs_mismatch_warn
def _ensure_geometry_column(df):
"""
Helper function to ensure the geometry column is called 'geometry'.
If another column with that name exists, it will be dropped.
"""
if not df._geometry_column_name == "geometry":
if "geometry" in df.columns:
df.drop("geometry", axis=1, inplace=True)
df.rename(
columns={df._geometry_column_name: "geometry"}, copy=False, inplace=True
)
df.set_geometry("geometry", inplace=True)
def _overlay_intersection(df1, df2):
"""
Overlay Intersection operation used in overlay function
"""
# Spatial Index to create intersections
idx1, idx2 = df2.sindex.query_bulk(df1.geometry, predicate="intersects", sort=True)
# Create pairs of geometries in both dataframes to be intersected
if idx1.size > 0 and idx2.size > 0:
left = df1.geometry.take(idx1)
left.reset_index(drop=True, inplace=True)
right = df2.geometry.take(idx2)
right.reset_index(drop=True, inplace=True)
intersections = left.intersection(right)
poly_ix = intersections.type.isin(["Polygon", "MultiPolygon"])
intersections.loc[poly_ix] = intersections[poly_ix].buffer(0)
# only keep actual intersecting geometries
pairs_intersect = pd.DataFrame({"__idx1": idx1, "__idx2": idx2})
geom_intersect = intersections
# merge data for intersecting geometries
df1 = df1.reset_index(drop=True)
df2 = df2.reset_index(drop=True)
dfinter = pairs_intersect.merge(
df1.drop(df1._geometry_column_name, axis=1),
left_on="__idx1",
right_index=True,
)
dfinter = dfinter.merge(
df2.drop(df2._geometry_column_name, axis=1),
left_on="__idx2",
right_index=True,
suffixes=("_1", "_2"),
)
return GeoDataFrame(dfinter, geometry=geom_intersect, crs=df1.crs)
else:
return GeoDataFrame(
[],
columns=list(set(df1.columns).union(df2.columns)) + ["__idx1", "__idx2"],
crs=df1.crs,
)
def _overlay_difference(df1, df2):
"""
Overlay Difference operation used in overlay function
"""
# spatial index query to find intersections
idx1, idx2 = df2.sindex.query_bulk(df1.geometry, predicate="intersects", sort=True)
idx1_unique, idx1_unique_indices = np.unique(idx1, return_index=True)
idx2_split = np.split(idx2, idx1_unique_indices[1:])
sidx = [
idx2_split.pop(0) if idx in idx1_unique else []
for idx in range(df1.geometry.size)
]
# Create differences
new_g = []
for geom, neighbours in zip(df1.geometry, sidx):
new = reduce(
lambda x, y: x.difference(y), [geom] + list(df2.geometry.iloc[neighbours])
)
new_g.append(new)
differences = GeoSeries(new_g, index=df1.index, crs=df1.crs)
poly_ix = differences.type.isin(["Polygon", "MultiPolygon"])
differences.loc[poly_ix] = differences[poly_ix].buffer(0)
geom_diff = differences[~differences.is_empty].copy()
dfdiff = df1[~differences.is_empty].copy()
dfdiff[dfdiff._geometry_column_name] = geom_diff
return dfdiff
def _overlay_symmetric_diff(df1, df2):
"""
Overlay Symmetric Difference operation used in overlay function
"""
dfdiff1 = _overlay_difference(df1, df2)
dfdiff2 = _overlay_difference(df2, df1)
dfdiff1["__idx1"] = range(len(dfdiff1))
dfdiff2["__idx2"] = range(len(dfdiff2))
dfdiff1["__idx2"] = np.nan
dfdiff2["__idx1"] = np.nan
# ensure geometry name (otherwise merge goes wrong)
_ensure_geometry_column(dfdiff1)
_ensure_geometry_column(dfdiff2)
# combine both 'difference' dataframes
dfsym = dfdiff1.merge(
dfdiff2, on=["__idx1", "__idx2"], how="outer", suffixes=("_1", "_2")
)
geometry = dfsym.geometry_1.copy()
geometry.name = "geometry"
# https://github.com/pandas-dev/pandas/issues/26468 use loc for now
geometry.loc[dfsym.geometry_1.isnull()] = dfsym.loc[
dfsym.geometry_1.isnull(), "geometry_2"
]
dfsym.drop(["geometry_1", "geometry_2"], axis=1, inplace=True)
dfsym.reset_index(drop=True, inplace=True)
dfsym = GeoDataFrame(dfsym, geometry=geometry, crs=df1.crs)
return dfsym
def _overlay_union(df1, df2):
"""
Overlay Union operation used in overlay function
"""
dfinter = _overlay_intersection(df1, df2)
dfsym = _overlay_symmetric_diff(df1, df2)
dfunion = pd.concat([dfinter, dfsym], ignore_index=True, sort=False)
# keep geometry column last
columns = list(dfunion.columns)
columns.remove("geometry")
columns = columns + ["geometry"]
return dfunion.reindex(columns=columns)
def overlay(df1, df2, how="intersection", keep_geom_type=None, make_valid=True):
"""Perform spatial overlay between two GeoDataFrames.
Currently only supports data GeoDataFrames with uniform geometry types,
i.e. containing only (Multi)Polygons, or only (Multi)Points, or a
combination of (Multi)LineString and LinearRing shapes.
Implements several methods that are all effectively subsets of the union.
See the User Guide page :doc:`../../user_guide/set_operations` for details.
Parameters
----------
df1 : GeoDataFrame
df2 : GeoDataFrame
how : string
Method of spatial overlay: 'intersection', 'union',
'identity', 'symmetric_difference' or 'difference'.
keep_geom_type : bool
If True, return only geometries of the same geometry type as df1 has,
if False, return all resulting geometries. Default is None,
which will set keep_geom_type to True but warn upon dropping
geometries.
make_valid : bool, default True
If True, any invalid input geometries are corrected with a call to `buffer(0)`,
if False, a `ValueError` is raised if any input geometries are invalid.
Returns
-------
df : GeoDataFrame
GeoDataFrame with new set of polygons and attributes
resulting from the overlay
Examples
--------
>>> from shapely.geometry import Polygon
>>> polys1 = geopandas.GeoSeries([Polygon([(0,0), (2,0), (2,2), (0,2)]),
... Polygon([(2,2), (4,2), (4,4), (2,4)])])
>>> polys2 = geopandas.GeoSeries([Polygon([(1,1), (3,1), (3,3), (1,3)]),
... Polygon([(3,3), (5,3), (5,5), (3,5)])])
>>> df1 = geopandas.GeoDataFrame({'geometry': polys1, 'df1_data':[1,2]})
>>> df2 = geopandas.GeoDataFrame({'geometry': polys2, 'df2_data':[1,2]})
>>> geopandas.overlay(df1, df2, how='union')
df1_data df2_data geometry
0 1.0 1.0 POLYGON ((2.00000 2.00000, 2.00000 1.00000, 1....
1 2.0 1.0 POLYGON ((2.00000 2.00000, 2.00000 3.00000, 3....
2 2.0 2.0 POLYGON ((4.00000 4.00000, 4.00000 3.00000, 3....
3 1.0 NaN POLYGON ((2.00000 0.00000, 0.00000 0.00000, 0....
4 2.0 NaN MULTIPOLYGON (((4.00000 3.00000, 4.00000 2.000...
5 NaN 1.0 MULTIPOLYGON (((3.00000 2.00000, 3.00000 1.000...
6 NaN 2.0 POLYGON ((3.00000 5.00000, 5.00000 5.00000, 5....
>>> geopandas.overlay(df1, df2, how='intersection')
df1_data df2_data geometry
0 1 1 POLYGON ((2.00000 2.00000, 2.00000 1.00000, 1....
1 2 1 POLYGON ((2.00000 2.00000, 2.00000 3.00000, 3....
2 2 2 POLYGON ((4.00000 4.00000, 4.00000 3.00000, 3....
>>> geopandas.overlay(df1, df2, how='symmetric_difference')
df1_data df2_data geometry
0 1.0 NaN POLYGON ((2.00000 0.00000, 0.00000 0.00000, 0....
1 2.0 NaN MULTIPOLYGON (((4.00000 3.00000, 4.00000 2.000...
2 NaN 1.0 MULTIPOLYGON (((3.00000 2.00000, 3.00000 1.000...
3 NaN 2.0 POLYGON ((3.00000 5.00000, 5.00000 5.00000, 5....
>>> geopandas.overlay(df1, df2, how='difference')
geometry df1_data
0 POLYGON ((2.00000 0.00000, 0.00000 0.00000, 0.... 1
1 MULTIPOLYGON (((3.00000 3.00000, 4.00000 3.000... 2
>>> geopandas.overlay(df1, df2, how='identity')
df1_data df2_data geometry
0 1.0 1.0 POLYGON ((2.00000 2.00000, 2.00000 1.00000, 1....
1 2.0 1.0 POLYGON ((2.00000 2.00000, 2.00000 3.00000, 3....
2 2.0 2.0 POLYGON ((4.00000 4.00000, 4.00000 3.00000, 3....
3 1.0 NaN POLYGON ((2.00000 0.00000, 0.00000 0.00000, 0....
4 2.0 NaN MULTIPOLYGON (((4.00000 3.00000, 4.00000 2.000...
See also
--------
sjoin : spatial join
Notes
------
Every operation in GeoPandas is planar, i.e. the potential third
dimension is not taken into account.
"""
# Allowed operations
allowed_hows = [
"intersection",
"union",
"identity",
"symmetric_difference",
"difference", # aka erase
]
# Error Messages
if how not in allowed_hows:
raise ValueError(
"`how` was '{0}' but is expected to be in {1}".format(how, allowed_hows)
)
if isinstance(df1, GeoSeries) or isinstance(df2, GeoSeries):
raise NotImplementedError(
"overlay currently only implemented for " "GeoDataFrames"
)
if not _check_crs(df1, df2):
_crs_mismatch_warn(df1, df2, stacklevel=3)
if keep_geom_type is None:
keep_geom_type = True
keep_geom_type_warning = True
else:
keep_geom_type_warning = False
polys = ["Polygon", "MultiPolygon"]
lines = ["LineString", "MultiLineString", "LinearRing"]
points = ["Point", "MultiPoint"]
for i, df in enumerate([df1, df2]):
poly_check = df.geom_type.isin(polys).any()
lines_check = df.geom_type.isin(lines).any()
points_check = df.geom_type.isin(points).any()
if sum([poly_check, lines_check, points_check]) > 1:
raise NotImplementedError(
"df{} contains mixed geometry types.".format(i + 1)
)
box_gdf1 = df1.total_bounds
box_gdf2 = df2.total_bounds
if not (
((box_gdf1[0] <= box_gdf2[2]) and (box_gdf2[0] <= box_gdf1[2]))
and ((box_gdf1[1] <= box_gdf2[3]) and (box_gdf2[1] <= box_gdf1[3]))
):
return GeoDataFrame(
[],
columns=list(
set(
df1.drop(df1.geometry.name, axis=1).columns.to_list()
+ df2.drop(df2.geometry.name, axis=1).columns.to_list()
)
)
+ ["geometry"],
)
# Computations
def _make_valid(df):
df = df.copy()
if df.geom_type.isin(polys).all():
mask = ~df.geometry.is_valid
col = df._geometry_column_name
if make_valid:
df.loc[mask, col] = df.loc[mask, col].buffer(0)
elif mask.any():
raise ValueError(
"You have passed make_valid=False along with "
f"{mask.sum()} invalid input geometries. "
"Use make_valid=True or make sure that all geometries "
"are valid before using overlay."
)
return df
df1 = _make_valid(df1)
df2 = _make_valid(df2)
with warnings.catch_warnings(): # CRS checked above, supress array-level warning
warnings.filterwarnings("ignore", message="CRS mismatch between the CRS")
if how == "difference":
return _overlay_difference(df1, df2)
elif how == "intersection":
result = _overlay_intersection(df1, df2)
elif how == "symmetric_difference":
result = _overlay_symmetric_diff(df1, df2)
elif how == "union":
result = _overlay_union(df1, df2)
elif how == "identity":
dfunion = _overlay_union(df1, df2)
result = dfunion[dfunion["__idx1"].notnull()].copy()
if keep_geom_type:
key_order = result.keys()
exploded = result.reset_index(drop=True).explode()
exploded = exploded.reset_index(level=0)
orig_num_geoms = result.shape[0]
geom_type = df1.geom_type.iloc[0]
if geom_type in polys:
exploded = exploded.loc[exploded.geom_type.isin(polys)]
elif geom_type in lines:
exploded = exploded.loc[exploded.geom_type.isin(lines)]
elif geom_type in points:
exploded = exploded.loc[exploded.geom_type.isin(points)]
else:
raise TypeError("`keep_geom_type` does not support {}.".format(geom_type))
# level_0 created with above reset_index operation
# and represents the original geometry collections
result = exploded.dissolve(by="level_0")[key_order]
if (result.shape[0] != orig_num_geoms) and keep_geom_type_warning:
num_dropped = orig_num_geoms - result.shape[0]
warnings.warn(
"`keep_geom_type=True` in overlay resulted in {} dropped "
"geometries of different geometry types than df1 has. "
"Set `keep_geom_type=False` to retain all "
"geometries".format(num_dropped),
UserWarning,
stacklevel=2,
)
result.reset_index(drop=True, inplace=True)
result.drop(["__idx1", "__idx2"], axis=1, inplace=True)
return result
| bsd-3-clause |
saiwing-yeung/log-tunes | Utilities/analyze-log.py | 1 | 2983 | import numpy as np
import pandas as pd
import csv
get_ipython().magic('matplotlib inline')
import matplotlib
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
input_file = '~/Documents/itunes-log.csv'
logs = pd.read_csv(input_file, quotechar = '"', escapechar = "\\", parse_dates = [0])
print("\nMost frequently played media:")
print(pd.value_counts(logs["name"]).head(10))
print("\nMost frequently played artists:")
print(pd.value_counts(logs["artist"]).head(10))
logs["play_hour"] = pd.DatetimeIndex(logs['date']).hour
plt.figure(figsize=(15, 7))
ax = plt.subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
plt.xticks(range(0, 24), fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel("Hour", fontsize=16)
plt.ylabel("Frequency", fontsize=16)
plt.hist(logs['play_hour'], bins=range(0, 25))
plt.savefig('count-by-hour.png')
import seaborn as sns
sns.set()
sns.color_palette("hls", 8)
# construct a DataFrame that represent the percentage of plays of each genre by hour
logs_hXg = logs.groupby(['play_hour', 'genre']).agg({'genre': 'count'})
logs_hXg_long = logs_hXg.groupby(level=0).apply(lambda x: 100*x/float(x.sum()))
# turn this DataFrame into a wide format
logs_hXg_wide = logs_hXg_long.unstack('genre').fillna(0).reindex(range(24), fill_value=0).transpose()
# sort logs_hXg_wide by the total number of plays per genre
genre_by_count_sorted = logs['genre'].value_counts()
genre_count = genre_by_count_sorted.ix[np.sort(genre_by_count_sorted.index.values)]
# sort logs_hXg_wide so that higher ranked genres appear on top, in the same order as the legend
logs_hXg_wide['rank'] = (genre_count.values.argsort()[::-1]).argsort()
logs_hXg_wide.sort_values('rank', inplace=True, ascending=False)
logs_hXg_wide.drop(['rank'], axis=1, inplace=True)
# create the data to be used for plotting
mat_plot = logs_hXg_wide.as_matrix()
idx = np.arange(24)
# prepare the plot
fig = plt.figure(figsize=(15, 7))
ax = plt.subplot(111)
ax.margins(0, 0)
plt.xticks(range(0, 24), fontsize=14)
plt.yticks(fontsize=14)
plt.title('Distribution of genre by hour of day', fontsize=16)
plt.xlabel("Hour of day", fontsize=16)
plt.ylabel("Percent (%)", fontsize=16)
# set up color scheme and plot the figure
color_scheme = sns.color_palette("cubehelix", mat_plot.shape[0])
sp = ax.stackplot(idx, mat_plot, edgecolor='white', colors=color_scheme)
# legend
num_in_legend = 5 # number of genres to show in the legend
proxy = list(reversed( [ matplotlib.patches.Rectangle((0, 0), 0, 0, facecolor=pol.get_facecolor()[0])
for pol in sp ] ))
ax.legend(proxy[:num_in_legend], genre_by_count_sorted.index[:num_in_legend],
title="Top %d genres" % num_in_legend, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
# need to specify bbox otherwise legend would be clipped in the saved figure
plt.savefig('genre-by-hour.png', bbox_inches='tight')
plt.show()
| gpl-3.0 |
andrewnc/scikit-learn | examples/gaussian_process/plot_gp_probabilistic_classification_after_regression.py | 252 | 3490 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
==============================================================================
Gaussian Processes classification example: exploiting the probabilistic output
==============================================================================
A two-dimensional regression exercise with a post-processing allowing for
probabilistic classification thanks to the Gaussian property of the prediction.
The figure illustrates the probability that the prediction is negative with
respect to the remaining uncertainty in the prediction. The red and blue lines
corresponds to the 95% confidence interval on the prediction of the zero level
set.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from scipy import stats
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
from matplotlib import cm
# Standard normal distribution functions
phi = stats.distributions.norm().pdf
PHI = stats.distributions.norm().cdf
PHIinv = stats.distributions.norm().ppf
# A few constants
lim = 8
def g(x):
"""The function to predict (classification will then consist in predicting
whether g(x) <= 0 or not)"""
return 5. - x[:, 1] - .5 * x[:, 0] ** 2.
# Design of experiments
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
# Observations
y = g(X)
# Instanciate and fit Gaussian Process Model
gp = GaussianProcess(theta0=5e-1)
# Don't perform MLE or you'll get a perfect prediction for this simple example!
gp.fit(X, y)
# Evaluate real function, the prediction and its MSE on a grid
res = 50
x1, x2 = np.meshgrid(np.linspace(- lim, lim, res),
np.linspace(- lim, lim, res))
xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T
y_true = g(xx)
y_pred, MSE = gp.predict(xx, eval_MSE=True)
sigma = np.sqrt(MSE)
y_true = y_true.reshape((res, res))
y_pred = y_pred.reshape((res, res))
sigma = sigma.reshape((res, res))
k = PHIinv(.975)
# Plot the probabilistic classification iso-values using the Gaussian property
# of the prediction
fig = pl.figure(1)
ax = fig.add_subplot(111)
ax.axes.set_aspect('equal')
pl.xticks([])
pl.yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
pl.xlabel('$x_1$')
pl.ylabel('$x_2$')
cax = pl.imshow(np.flipud(PHI(- y_pred / sigma)), cmap=cm.gray_r, alpha=0.8,
extent=(- lim, lim, - lim, lim))
norm = pl.matplotlib.colors.Normalize(vmin=0., vmax=0.9)
cb = pl.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm)
cb.set_label('${\\rm \mathbb{P}}\left[\widehat{G}(\mathbf{x}) \leq 0\\right]$')
pl.plot(X[y <= 0, 0], X[y <= 0, 1], 'r.', markersize=12)
pl.plot(X[y > 0, 0], X[y > 0, 1], 'b.', markersize=12)
cs = pl.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot')
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.025], colors='b',
linestyles='solid')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.5], colors='k',
linestyles='dashed')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.975], colors='r',
linestyles='solid')
pl.clabel(cs, fontsize=11)
pl.show()
| bsd-3-clause |
plowman/python-mcparseface | models/syntaxnet/tensorflow/tensorflow/contrib/learn/python/learn/estimators/estimator.py | 2 | 20385 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import os
import tempfile
import time
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers
from tensorflow.contrib import losses
from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.contrib.learn.python.learn.graph_actions import evaluate
from tensorflow.contrib.learn.python.learn.graph_actions import infer
from tensorflow.contrib.learn.python.learn.graph_actions import train
from tensorflow.contrib.learn.python.learn.io import data_feeder
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import device_setter
from tensorflow.python.training import saver
# Default metrics for evaluation.
_EVAL_METRICS = {
'regression': {
'mean_squared_error': losses.sum_of_squares,
},
'classification': {
'logistic': losses.sigmoid_cross_entropy,
},}
class ModeKeys(object):
"""Standard names for model modes.
The following standard keys are defined:
* `TRAIN`: training mode.
* `EVAL`: evaluation mode.
* `INFER`: inference mode.
"""
TRAIN = 'train'
EVAL = 'eval'
INFER = 'infer'
def _get_input_fn(x, y, batch_size):
# TODO(ipoloshukin): Remove this when refactor of data_feeder is done
if hasattr(x, 'create_graph') and hasattr(y, 'create_graph'):
def input_fn():
return x.create_graph(), y.create_graph()
return input_fn, None
df = data_feeder.setup_train_data_feeder(x, y,
n_classes=None,
batch_size=batch_size)
return df.input_builder, df.get_feed_dict_fn()
def _get_predict_input_fn(x, batch_size):
# TODO(ipoloshukin): Remove this when refactor of data_feeder is done
if hasattr(x, 'create_graph'):
def input_fn():
return x.create_graph()
return input_fn, None
df = data_feeder.setup_train_data_feeder(x, None,
n_classes=None,
batch_size=batch_size)
return df.input_builder, df.get_feed_dict_fn()
class BaseEstimator(sklearn.BaseEstimator):
"""Abstract BaseEstimator class to train and evaluate TensorFlow models.
Concrete implementation of this class should provide following functions:
* _get_train_ops
* _get_eval_ops
* _get_predict_ops
It may override _get_default_metric_functions.
`Estimator` implemented below is a good example of how to use this class.
Parameters:
model_dir: Directory to save model parameters, graph and etc.
"""
__metaclass__ = abc.ABCMeta
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
def __init__(self, model_dir=None):
# Model directory.
self._model_dir = model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.info('Using temporary folder as model directory: %s',
self._model_dir)
# Create a run configuration
self._config = BaseEstimator._Config()
# Set device function depending if there are replicas or not.
if self._config.num_ps_replicas > 0:
ps_ops = ['Variable', 'AutoReloadVariable']
self._device_fn = device_setter.replica_device_setter(
ps_tasks=self._config.num_ps_replicas,
merge_devices=False, ps_ops=ps_ops)
else:
self._device_fn = None
# Features and targets TensorSingature objects.
self._features_info = None
self._targets_info = None
@abc.abstractproperty
def _get_train_ops(self, features, targets):
"""Method that builds model graph and returns trainer ops.
Expected to be overriden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
targets: `Tensor` or `dict` of `Tensor` objects.
Returns:
Tuple of train `Operation` and loss `Tensor`.
"""
pass
@abc.abstractproperty
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
predictions: `Tensor` or `dict` of `Tensor` objects.
"""
pass
def _get_eval_ops(self, features, targets, metrics):
"""Method that builds model graph and returns evaluation ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
targets: `Tensor` or `dict` of `Tensor` objects.
metrics: `dict` of functions that take predictions and targets.
Returns:
metrics: `dict` of `Tensor` objects.
"""
predictions = self._get_predict_ops(features)
result = {}
for name, metric in six.iteritems(metrics):
result[name] = metric(predictions, targets)
return result
def _get_feature_ops_from_example(self, examples_batch):
"""Method that returns features given the batch of examples.
This method will be used to export model into a server.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
"""
raise NotImplementedError('_get_feature_ops_from_example not implemented '
'in BaseEstimator')
def _get_default_metric_functions(self):
"""Method that provides default metric operations.
This functions is intented to be overridden by sub-classes.
Returns:
`dict` of functions that take predictions and targets `Tensor` objects and
return `Tensor`.
"""
return {}
def fit(self, x, y, steps, batch_size=32, monitor=None):
"""Trains a model given training data X and y.
Args:
x: matrix or tensor of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features. The training input
samples for fitting the model.
y: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of targets. The training target values
(class labels in classification, real numbers in regression).
steps: number of steps to train model for.
batch_size: minibatch size to use on the input, defaults to 32.
monitor: monitor object to print training progress and invoke
early stopping.
Returns:
Returns self.
"""
input_fn, feed_fn = _get_input_fn(x, y, batch_size)
return self._train_model(input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
monitor=monitor)
def train(self, input_fn, steps, monitor=None):
"""Trains a model given input builder function.
Args:
input_fn: Input builder function, returns tuple of dicts or
dict and Tensor.
steps: number of steps to train model for.
monitor: monitor object to print training progress and invoke
early stopping.
Returns:
Returns self.
"""
return self._train_model(input_fn=input_fn, steps=steps, monitor=monitor)
def partial_fit(self, x, y, steps=1, batch_size=32, monitor=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: matrix or tensor of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features. The training input
samples for fitting the model.
y: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of targets. The training target values
(class label in classification, real numbers in regression).
steps: number of steps to train model for.
batch_size: minibatch size to use on the input, defaults to 32.
monitor: Monitor object to print training progress and invoke
early stopping.
Returns:
Returns self.
"""
input_fn, feed_fn = _get_input_fn(x, y, batch_size)
return self._train_model(input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
monitor=monitor)
def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None,
batch_size=32, steps=100, metrics=None):
"""Evaluates given model with provided evaluation data.
Args:
x: features.
y: targets.
input_fn: Input function. If set, x and y must be None.
feed_fn: Function creating a feed dict every time it is called. Called
once per iteration.
batch_size: minibatch size to use on the input, defaults to 32. Ignored
if input_fn is set.
steps: Number of steps to evalute for.
metrics: Dict of metric ops to run.
Returns:
Returns self.
Raises:
ValueError: If x or y are not None while input_fn or feed_fn is not None.
"""
if (x is not None or y is not None) and input_fn is not None:
raise ValueError('Either x and y or input_fn must be None.')
if input_fn is None:
assert x is not None
input_fn, feed_fn = _get_input_fn(x, y, batch_size)
return self._evaluate_model(input_fn=input_fn, feed_fn=feed_fn,
steps=steps, metrics=metrics)
def predict(self, x, axis=None, batch_size=None):
"""Returns predictions for given features.
Args:
x: features.
axis: Axis on which to argmax. (for classification).
batch_size: Override default batch size.
Returns:
Numpy array of predicted classes or regression values.
"""
return self._infer_model(x=x, batch_size=batch_size, axis=axis)
def predict_proba(self, x, batch_size=None):
"""Returns prediction probabilities for given features (classification).
Args:
x: features.
batch_size: OVerride default batch size.
Returns:
Numpy array of predicted probabilities.
"""
return self._infer_model(x=x, batch_size=batch_size, proba=True)
def _check_inputs(self, features, targets):
if self._features_info is not None:
if not tensor_signature.tensors_compatible(features, self._features_info):
raise ValueError('Features are incompatible with given information. '
'Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
else:
self._features_info = tensor_signature.create_signatures(features)
if self._targets_info is not None:
if not tensor_signature.tensors_compatible(targets, self._targets_info):
raise ValueError('Targets are incompatible with given information. '
'Given targets: %s, required signatures: %s.' %
(str(targets), str(self._targets_info)))
else:
self._targets_info = tensor_signature.create_signatures(targets)
def _train_model(self,
input_fn,
steps,
feed_fn=None,
device_fn=None,
monitor=None,
log_every_steps=100,
fail_on_nan_loss=True):
if self._config.execution_mode not in ('all', 'train'):
return
# Stagger startup of worker sessions based on task id.
sleep_secs = min(self._config.training_worker_max_startup_secs,
self._config.task *
self._config.training_worker_session_startup_stagger_secs)
if sleep_secs:
logging.info('Waiting %d secs before starting task %d.', sleep_secs,
self._config.task)
time.sleep(sleep_secs)
# Device allocation
device_fn = device_fn or self._device_fn
with ops.Graph().as_default() as g, g.device(device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, targets = input_fn()
self._check_inputs(features, targets)
train_op, loss_op = self._get_train_ops(features, targets)
return train(
graph=g,
output_dir=self._model_dir,
train_op=train_op,
loss_op=loss_op,
global_step_tensor=global_step,
log_every_steps=log_every_steps,
supervisor_is_chief=(self._config.task == 0),
supervisor_master=self._config.master,
feed_fn=feed_fn,
max_steps=steps,
fail_on_nan_loss=fail_on_nan_loss)
def _evaluate_model(self, input_fn, steps, feed_fn=None, metrics=None):
if self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset'):
return
checkpoint_path = saver.latest_checkpoint(self._model_dir)
eval_dir = os.path.join(self._model_dir, 'eval')
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, targets = input_fn()
self._check_inputs(features, targets)
eval_dict = self._get_eval_ops(features, targets, metrics or
self._get_default_metric_functions())
eval_results, _ = evaluate(
graph=g,
output_dir=eval_dir,
checkpoint_path=checkpoint_path,
eval_dict=eval_dict,
global_step_tensor=global_step,
supervisor_master=self._config.master,
feed_fn=feed_fn,
max_steps=steps)
return eval_results
def _infer_model(self, x, batch_size=None, axis=None, proba=False):
# Converts inputs into tf.DataFrame / tf.Series.
batch_size = -1 if batch_size is None else batch_size
input_fn, feed_fn = _get_predict_input_fn(x, batch_size)
checkpoint_path = saver.latest_checkpoint(self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
contrib_framework.create_global_step(g)
features, _ = input_fn()
feed_dict = feed_fn() if feed_fn is not None else None
predictions = self._get_predict_ops(features)
if not isinstance(predictions, dict):
predictions = {'predictions': predictions}
# TODO(ipolosukhin): Support batching
return infer(checkpoint_path, predictions, feed_dict=feed_dict)
class Estimator(BaseEstimator):
"""Estimator class is the basic TensorFlow model trainer/evaluator.
Parameters:
model_fn: Model function, takes features and targets tensors or dicts of
tensors and returns predictions and loss tensors.
E.g. `(features, targets) -> (predictions, loss)`.
model_dir: Directory to save model parameters, graph and etc.
classification: boolean, true if classification problem.
learning_rate: learning rate for the model.
optimizer: optimizer for the model, can be:
string: name of optimizer, like 'SGD', 'Adam', 'Adagrad', 'Ftl',
'Momentum', 'RMSProp', 'Momentum').
Full list in contrib/layers/optimizers.py
class: sub-class of Optimizer
(like tf.train.GradientDescentOptimizer).
clip_gradients: clip_norm value for call to `clip_by_global_norm`. None
denotes no gradient clipping.
"""
def __init__(self,
model_fn=None,
model_dir=None,
classification=True,
learning_rate=0.01,
optimizer='SGD',
clip_gradients=None):
super(Estimator, self).__init__(model_dir=model_dir)
self._model_fn = model_fn
self._classification = classification
if isinstance(optimizer, six.string_types):
if optimizer not in layers.OPTIMIZER_CLS_NAMES:
raise ValueError(
'Optimizer name should be one of [%s], you provided %s.' %
(', '.join(layers.OPTIMIZER_CLS_NAMES), optimizer))
self.optimizer = optimizer
self.learning_rate = learning_rate
self.clip_gradients = clip_gradients
def _get_train_ops(self, features, targets):
"""Method that builds model graph and returns trainer ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
targets: `Tensor` or `dict` of `Tensor` objects.
Returns:
Tuple of train `Operation` and loss `Tensor`.
"""
_, loss = self._model_fn(features, targets, ModeKeys.TRAIN)
train_op = layers.optimize_loss(
loss,
contrib_framework.get_global_step(),
learning_rate=self.learning_rate,
optimizer=self.optimizer,
clip_gradients=self.clip_gradients)
return train_op, loss
def _get_eval_ops(self, features, targets, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
targets: `Tensor` or `dict` of `Tensor` objects.
metrics: `dict` of functions that take predictions and targets.
Returns:
metrics: `dict` of `Tensor` objects.
"""
predictions, loss = self._model_fn(features, targets, ModeKeys.EVAL)
result = {'loss': loss}
if isinstance(targets, dict) and len(targets) == 1:
# Unpack single target into just tensor.
targets = targets[targets.keys()[0]]
for name, metric in six.iteritems(metrics):
# TODO(ipolosukhin): Add support for multi-head metrics.
result[name] = metric(predictions, targets)
return result
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overriden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
predictions: `Tensor` or `dict` of `Tensor` objects.
"""
targets = tensor_signature.create_placeholders_from_signatures(
self._targets_info)
predictions, _ = self._model_fn(features, targets, ModeKeys.INFER)
return predictions
def _get_default_metric_functions(self):
"""Method that provides default metric operations.
Returns:
a dictionary of metric operations.
"""
return _EVAL_METRICS[
'classification' if self._classification else 'regression']
def _get_feature_ops_from_example(self, examples_batch):
"""Unimplemented.
TODO(vihanjain): We need a way to parse tf.Example into features.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
Raises:
Exception: Unimplemented
"""
raise NotImplementedError('_get_feature_ops_from_example not yet '
'implemented')
| apache-2.0 |
liyu1990/sklearn | sklearn/preprocessing/label.py | 16 | 26702 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Joel Nothman <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import column_or_1d
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-indicator', and 'unknown'.
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
check_is_fitted(self, 'classes_')
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
check_is_fitted(self, 'classes_')
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
if y_type == 'unknown':
raise ValueError("The type of target data is not known")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
else:
raise ValueError("%s target data is not supported with label "
"binarization" % y_type)
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| bsd-3-clause |
teonlamont/mne-python | examples/simulation/plot_simulate_raw_data.py | 6 | 2822 | """
===========================
Generate simulated raw data
===========================
This example generates raw data by repeating a desired source
activation multiple times.
"""
# Authors: Yousra Bekhti <[email protected]>
# Mark Wronkiewicz <[email protected]>
# Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import read_source_spaces, find_events, Epochs, compute_covariance
from mne.datasets import sample
from mne.simulation import simulate_sparse_stc, simulate_raw
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
trans_fname = data_path + '/MEG/sample/sample_audvis_raw-trans.fif'
src_fname = data_path + '/subjects/sample/bem/sample-oct-6-src.fif'
bem_fname = (data_path +
'/subjects/sample/bem/sample-5120-5120-5120-bem-sol.fif')
# Load real data as the template
raw = mne.io.read_raw_fif(raw_fname)
raw.set_eeg_reference(projection=True)
raw = raw.crop(0., 30.) # 30 sec is enough
##############################################################################
# Generate dipole time series
n_dipoles = 4 # number of dipoles to create
epoch_duration = 2. # duration of each epoch/event
n = 0 # harmonic number
def data_fun(times):
"""Generate time-staggered sinusoids at harmonics of 10Hz"""
global n
n_samp = len(times)
window = np.zeros(n_samp)
start, stop = [int(ii * float(n_samp) / (2 * n_dipoles))
for ii in (2 * n, 2 * n + 1)]
window[start:stop] = 1.
n += 1
data = 25e-9 * np.sin(2. * np.pi * 10. * n * times)
data *= window
return data
times = raw.times[:int(raw.info['sfreq'] * epoch_duration)]
src = read_source_spaces(src_fname)
stc = simulate_sparse_stc(src, n_dipoles=n_dipoles, times=times,
data_fun=data_fun, random_state=0)
# look at our source data
fig, ax = plt.subplots(1)
ax.plot(times, 1e9 * stc.data.T)
ax.set(ylabel='Amplitude (nAm)', xlabel='Time (sec)')
mne.viz.utils.plt_show()
##############################################################################
# Simulate raw data
raw_sim = simulate_raw(raw, stc, trans_fname, src, bem_fname, cov='simple',
iir_filter=[0.2, -0.2, 0.04], ecg=True, blink=True,
n_jobs=1, verbose=True)
raw_sim.plot()
##############################################################################
# Plot evoked data
events = find_events(raw_sim) # only 1 pos, so event number == 1
epochs = Epochs(raw_sim, events, 1, -0.2, epoch_duration)
cov = compute_covariance(epochs, tmax=0., method='empirical',
verbose='error') # quick calc
evoked = epochs.average()
evoked.plot_white(cov, time_unit='s')
| bsd-3-clause |
casimp/pyxe | bin/williams.py | 3 | 1067 | import numpy as np
import matplotlib.pyplot as plt
def sigma_xx(K, r, theta):
sigma = (K / (2 * np.pi * r / 1000) ** 0.5) * np.cos(theta / 2) * (
1 - np.sin(theta / 2) * np.sin(3 * theta / 2))
return sigma
def sigma_yy(K, r, theta):
sigma = (K / (2 * np.pi * r / 1000) ** 0.5) * np.cos(theta / 2) * (
1 + np.sin(theta / 2) * np.sin(3 * theta / 2))
return sigma
def sigma_xy(K, r, theta):
sigma = (K / (2 * np.pi * r / 1000) ** 0.5) * np.cos(theta / 2) * np.sin(
theta / 2) * np.sin(3 * theta / 2)
sigma[theta < 0] = -sigma[theta < 0]
return sigma
def cart2pol(x, y):
r = np.sqrt(x**2 + y**2)
theta = np.arctan2(y, x)
return r, theta
if __name__ == "__main__":
K_ = 20 * 10**6
x_ = np.linspace(-0.75, 1.25, 201)
y_ = np.linspace(-1, 1, 201)
X, Y = np.meshgrid(x_, y_)
r_, theta_ = cart2pol(X, Y)
sig_xy = sigma_xy(K_, r_, theta_, 700*10**6)
plt.contourf(X, Y, sig_xy, 25)
plt.contour(X, Y, sig_xy, 25, colors='k', linewidth=0.5)
plt.show()
| mit |
lifuhuang/critic | hotels/baseline.py | 1 | 2023 | # -*- coding: utf-8 -*-
"""
Created on Thu May 5 14:49:42 2016
@author: lifu
"""
import sys
######
sys.path.append('/home/lifu/PySpace/legonet')
######
import os.path as op
import tensorflow as tf
import numpy as np
import pandas as pd
from legonet import optimizers
from legonet.layers import FullyConnected, Input, Embedding, Sequential, Parallel
from legonet.models import NeuralNetwork
data_path = '/mnt/shared/hotels/df1'
params_path = '/home/lifu/PySpace/critic/hotels/params.npz'
if __name__ == '__main__':
model = NeuralNetwork(optimizer=optimizers.Adam(), log_dir='logs')
model.add(Input('input', 200))
model.add(FullyConnected('hidden1', 512, 'relu'))
model.add(FullyConnected('hidden2', 256, 'relu'))
model.add(FullyConnected('hidden3', 128, 'relu'))
model.add(FullyConnected('output', 2))
model.build()
print 'Model constructed!'
try:
model.load_checkpoint('./checkpoints/')
print 'checkpoint loaded!'
except Exception as e:
print 'File not found!'
df = pd.read_pickle(data_path)
split = df.shape[0] // 10 * 9
target = df['ratings.overall']
df['negative'] = (target <= 3).astype(int)
df['neutral'] = ((target > 3) & (target < 5)).astype(int)
df['positive'] = (target == 5).astype(int)
df_train = df.iloc[:split, :]
X_train = np.vstack(df_train.loc[:, 'vector'])
Y_train = df_train.loc[:, ['negative', 'neutral', 'positive']].values
df_dev = df.iloc[split:, :]
X_dev = np.vstack(df_dev.loc[:, 'vector'])
Y_dev = df_dev.loc[:, ['negative', 'neutral', 'positive']].values
model.fit(X_train, Y_train, n_epochs=2, batch_size=128,
loss_decay=0.9, checkpoint_dir='./checkpoints')
n_test = 30
text = df_dev.iloc[:n_test]['text']
yh = model.predict(X_dev[:n_test])
for i in xrange(n_test):
print '%d:' % i
print 'Y_true', Y_dev[i]
print 'Y_pred', yh[i]
print text.iat[i] | gpl-3.0 |
avmarchenko/exa | exa/core/numerical.py | 2 | 15937 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2018, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Data Objects
###################################
Data objects are used to store typed data coming from an external source (for
example a file on disk). There are three primary data objects provided by
this module, :class:`~exa.core.numerical.Series`, :class:`~exa.core.numerical.DataFrame`,
and :class:`~exa.core.numerical.Field`. The purpose of these objects is to facilitate
conversion of data into "traits" used in visualization and enforce relationships
between data objects in a given container. Any of the objects provided by this
module may be extended.
"""
import warnings
import numpy as np
import pandas as pd
from exa.core.error import RequiredColumnError
class Numerical(object):
"""
Base class for :class:`~exa.core.numerical.Series`,
:class:`~exa.core.numerical.DataFrame`, and :class:`~exa.numerical.Field`
objects, providing default trait functionality and clean representations
when present as part of containers.
"""
def slice_naive(self, key):
"""
Slice a data object based on its index, either by value (.loc) or
position (.iloc).
Args:
key: Single index value, slice, tuple, or list of indices/positionals
Returns:
data: Slice of self
"""
cls = self.__class__
key = check_key(self, key)
return cls(self.loc[key])
def __repr__(self):
name = self.__class__.__name__
return '{0}{1}'.format(name, self.shape)
def __str__(self):
return self.__repr__()
class BaseSeries(Numerical):
"""
Base class for dense and sparse series objects (labeled arrays).
Attributes:
_sname (str): May have a required name (default None)
_iname (str: May have a required index name
_stype (type): May have a required value type
_itype (type): May have a required index type
"""
_metadata = ['name', 'meta']
# These attributes may be set when subclassing Series
_sname = None # Series may have a required name
_iname = None # Series may have a required index name
_stype = None # Series may have a required value type
_itype = None # Series may have a required index type
def __init__(self, *args, **kwargs):
meta = kwargs.pop('meta', None)
super(BaseSeries, self).__init__(*args, **kwargs)
if self._sname is not None and self.name != self._sname:
if self.name is not None:
warnings.warn("Object's name changed")
self.name = self._sname
if self._iname is not None and self.index.name != self._iname:
if self.index.name is not None:
warnings.warn("Object's index name changed")
self.index.name = self._iname
self.meta = meta
class BaseDataFrame(Numerical):
"""
Base class for dense and sparse dataframe objects (labeled matrices).
Note:
If the _cardinal attribute is populated, it will automatically be added
to the _categories and _columns attributes.
Attributes:
_cardinal (tuple): Tuple of column name and raw type that acts as foreign key to index of another table
_index (str): Name of index (may be used as foreign key in another table)
_columns (list): Required columns
_categories (dict): Dict of column names, raw types that if present will be converted to and from categoricals automatically
"""
_metadata = ['name', 'meta']
_cardinal = None # Tuple of column name and raw type that acts as foreign key to index of another table
_index = None # Name of index (may be used as foreign key in another table)
_columns = [] # Required columns
_categories = {} # Dict of column names, raw types that if present will be converted to and from categoricals automatically
def cardinal_groupby(self):
"""
Group this object on it cardinal dimension (_cardinal).
Returns:
grpby: Pandas groupby object (grouped on _cardinal)
"""
g, t = self._cardinal
self[g] = self[g].astype(t)
grpby = self.groupby(g)
self[g] = self[g].astype('category')
return grpby
def slice_cardinal(self, key):
"""
Get the slice of this object by the value or values of the cardinal
dimension.
"""
cls = self.__class__
key = check_key(self, key, cardinal=True)
return cls(self[self[self._cardinal[0]].isin(key)])
def __init__(self, *args, **kwargs):
meta = kwargs.pop('meta', None)
super(BaseDataFrame, self).__init__(*args, **kwargs)
self.meta = meta
class Series(BaseSeries, pd.Series):
"""
A labeled array.
.. code-block:: Python
class MySeries(exa.core.numerical.Series):
_sname = 'data' # series default name
_iname = 'data_index' # series default index name
seri = MySeries(np.random.rand(10**5))
"""
@property
def _constructor(self):
return Series
def copy(self, *args, **kwargs):
"""
Make a copy of this object.
See Also:
For arguments and description of behavior see `pandas docs`_.
.. _pandas docs: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.copy.html
"""
cls = self.__class__ # Note that type conversion does not perform copy
return cls(pd.Series(self).copy(*args, **kwargs))
class DataFrame(BaseDataFrame, pd.DataFrame):
"""
A data table
.. code-block:: Python
class MyDF(exa.core.numerical.DataFrame):
_cardinal = ('cardinal', int)
_index = 'mydf_index'
_columns = ['x', 'y', 'z', 'symbol']
_categories = {'symbol': str}
"""
_constructor_sliced = Series
@property
def _constructor(self):
return DataFrame
def copy(self, *args, **kwargs):
"""
Make a copy of this object.
See Also:
For arguments and description of behavior see `pandas docs`_.
.. _pandas docs: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.copy.html
"""
cls = self.__class__ # Note that type conversion does not perform copy
return cls(pd.DataFrame(self).copy(*args, **kwargs))
def _revert_categories(self):
"""
Inplace conversion to categories.
"""
for column, dtype in self._categories.items():
if column in self.columns:
self[column] = self[column].astype(dtype)
def _set_categories(self):
"""
Inplace conversion from categories.
"""
for column, _ in self._categories.items():
if column in self.columns:
self[column] = self[column].astype('category')
def __init__(self, *args, **kwargs):
super(DataFrame, self).__init__(*args, **kwargs)
if self._cardinal is not None:
self._categories[self._cardinal[0]] = self._cardinal[1]
self._columns.append(self._cardinal[0])
self._set_categories()
if len(self) > 0:
name = self.__class__.__name__
if self._columns:
missing = set(self._columns).difference(self.columns)
if missing:
raise RequiredColumnError(missing, name)
if self.index.name != self._index and self._index is not None:
if self.index.name is not None and self.index.name.decode('utf-8') != self._index:
warnings.warn("Object's index name changed from {} to {}".format(self.index.name, self._index))
self.index.name = self._index
class Field(DataFrame):
"""
A field is defined by field data and field values. Field data defines the
discretization of the field (i.e. its origin in a given space, number of
steps/step spaceing, and endpoint for example). Field values can be scalar
(series) and/or vector (dataframe) data defining the magnitude and/or direction
at each given point.
Note:
The convention for generating the discrete field data and ordering of
the field values must be the same (e.g. discrete field points are
generated x, y, then z and scalar field values are a series object
ordered looping first over x then y, then z).
In addition to the :class:`~exa.core.numerical.DataFrame` attributes, this object
has the following:
"""
@property
def _constructor(self):
return Field
def copy(self, *args, **kwargs):
"""
Make a copy of this object.
Note:
Copies both field data and field values.
See Also:
For arguments and description of behavior see `pandas docs`_.
.. _pandas docs: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.copy.html
"""
cls = self.__class__ # Note that type conversion does not perform copy
data = pd.DataFrame(self).copy(*args, **kwargs)
values = [field.copy() for field in self.field_values]
return cls(data, field_values=values)
def memory_usage(self):
"""
Get the combined memory usage of the field data and field values.
"""
data = super(Field, self).memory_usage()
values = 0
for value in self.field_values:
values += value.memory_usage()
data['field_values'] = values
return data
def slice_naive(self, key):
"""
Naively (on index) slice the field data and values.
Args:
key: Int, slice, or iterable to select data and values
Returns:
field: Sliced field object
"""
cls = self.__class__
key = check_key(self, key)
enum = pd.Series(range(len(self)))
enum.index = self.index
values = self.field_values[enum[key].values]
data = self.loc[key]
return cls(data, field_values=values)
#def slice_cardinal(self, key):
# cls = self.__class__
# grpby = self.cardinal_groupby()
def __init__(self, *args, **kwargs):
# The following check allows creation of a single field (whose field data
# comes from a series object and field values from another series object).
field_values = kwargs.pop("field_values", None)
if args and isinstance(args[0], pd.Series):
args = (args[0].to_frame().T, )
super(Field, self).__init__(*args, **kwargs)
self._metadata = ['field_values']
if isinstance(field_values, (list, tuple, np.ndarray)):
self.field_values = [Series(v) for v in field_values] # Convert type for nice repr
elif field_values is None:
self.field_values = []
elif isinstance(field_values, pd.Series):
self.field_values = [Series(field_values)]
else:
raise TypeError("Wrong type for field_values with type {}".format(type(field_values)))
for i in range(len(self.field_values)):
self.field_values[i].name = i
class Field3D(Field):
"""
Dataframe for storing dimensions of a scalar or vector field of 3D space.
+-------------------+----------+-------------------------------------------+
| Column | Type | Description |
+===================+==========+===========================================+
| nx | int | number of grid points in x |
+-------------------+----------+-------------------------------------------+
| ny | int | number of grid points in y |
+-------------------+----------+-------------------------------------------+
| nz | int | number of grid points in z |
+-------------------+----------+-------------------------------------------+
| ox | float | field origin point in x |
+-------------------+----------+-------------------------------------------+
| oy | float | field origin point in y |
+-------------------+----------+-------------------------------------------+
| oz | float | field origin point in z |
+-------------------+----------+-------------------------------------------+
| xi | float | First component in x |
+-------------------+----------+-------------------------------------------+
| xj | float | Second component in x |
+-------------------+----------+-------------------------------------------+
| xk | float | Third component in x |
+-------------------+----------+-------------------------------------------+
| yi | float | First component in y |
+-------------------+----------+-------------------------------------------+
| yj | float | Second component in y |
+-------------------+----------+-------------------------------------------+
| yk | float | Third component in y |
+-------------------+----------+-------------------------------------------+
| zi | float | First component in z |
+-------------------+----------+-------------------------------------------+
| zj | float | Second component in z |
+-------------------+----------+-------------------------------------------+
| zk | float | Third component in z |
+-------------------+----------+-------------------------------------------+
Note:
Each field should be flattened into an N x 1 (scalar) or N x 3 (vector)
series or dataframe respectively. The orientation of the flattening
should have x as the outer loop and z values as the inner loop (for both
cases). This is sometimes called C-major or C-style order, and has
the last index changing the fastest and the first index changing the
slowest.
See Also:
:class:`~exa.core.numerical.Field`
"""
_columns = ['nx', 'ny', 'nz', 'ox', 'oy', 'oz', 'xi', 'xj', 'xk',
'yi', 'yj', 'yk', 'zi', 'zj', 'zk']
@property
def _constructor(self):
return Field3D
def check_key(data_object, key, cardinal=False):
"""
Update the value of an index key by matching values or getting positionals.
"""
itype = (int, np.int32, np.int64)
if not isinstance(key, itype + (slice, tuple, list, np.ndarray)):
raise KeyError("Unknown key type {} for key {}".format(type(key), key))
keys = data_object.index.values
if cardinal and data_object._cardinal is not None:
keys = data_object[data_object._cardinal[0]].unique()
elif isinstance(key, itype) and key in keys:
key = list(sorted(data_object.index.values[key]))
elif isinstance(key, itype) and key < 0:
key = list(sorted(data_object.index.values[key]))
elif isinstance(key, itype):
key = [key]
elif isinstance(key, slice):
key = list(sorted(data_object.index.values[key]))
elif isinstance(key, (tuple, list, pd.Index)) and not np.all(k in keys for k in key):
key = list(sorted(data_object.index.values[key]))
return key
class SparseDataFrame(BaseDataFrame, pd.SparseDataFrame):
@property
def _constructor(self):
return SparseDataFrame
| apache-2.0 |
sa2812/udacity | ud120/validation/validate_poi.py | 1 | 1096 | #!/usr/bin/python
"""
Starter code for the validation mini-project.
The first step toward building your POI identifier!
Start by loading/formatting the data
After that, it's not our code anymore--it's yours!
"""
import pickle
import sys
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
from sklearn.tree import DecisionTreeClassifier
from sklearn.cross_validation import train_test_split
data_dict = pickle.load(open("../final_project/final_project_dataset.pkl", "r") )
### first element is our labels, any added elements are predictor
### features. Keep this the same for the mini-project, but you'll
### have a different feature list when you do the final project.
features_list = ["poi", "salary"]
data = featureFormat(data_dict, features_list)
labels, features = targetFeatureSplit(data)
labels_train, labels_test, features_train, features_test = train_test_split(labels, features, test_size=0.3, random_state=42)
### it's all yours from here forward!
clf = DecisionTreeClassifier()
clf.fit(features_train, labels_train)
| mit |
materialsproject/MPContribs | mpcontribs-io/mpcontribs/io/core/utils.py | 1 | 5005 | # -*- coding: utf-8 -*-
"""module defines utility methods for MPContribs core.io library"""
from __future__ import unicode_literals
from decimal import Decimal, DecimalException, InvalidOperation
import six
from mpcontribs.io.core import mp_id_pattern
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
def get_short_object_id(cid):
"""return shortened contribution ID (ObjectId) for `cid`.
>>> get_short_object_id('5a8638add4f144413451852a')
'451852a'
>>> get_short_object_id('5a8638add4f1400000000000')
'5a8638a'
"""
length = 7
cid_short = str(cid)[-length:]
if cid_short == "0" * length:
cid_short = str(cid)[:length]
return cid_short
def make_pair(key, value, sep=":"):
"""return string for `key`-`value` pair with separator `sep`.
>>> make_pair('Phase', 'Hollandite')
u'Phase: Hollandite'
>>> print make_pair('ΔH', '0.066 eV/mol', sep=';')
ΔH; 0.066 eV/mol
>>> make_pair('k', 2.3)
u'k: 2.3'
"""
if not isinstance(value, six.string_types):
value = str(value)
return "{} ".format(sep).join([key, value])
def nest_dict(dct, keys):
# """nest `dct` under list of `keys`.
# >>> print nest_dict({'key': {'subkey': 'value'}}, ['a', 'b'])
# RecursiveDict([('a', RecursiveDict([('b', RecursiveDict([('key', RecursiveDict([('subkey', u'value')]))]))]))])
# """
from mpcontribs.io.core.recdict import RecursiveDict
nested_dict = dct
# nested_dict = RecursiveDict(dct)
# nested_dict.rec_update()
for key in reversed(keys):
nested_dict = RecursiveDict({key: nested_dict})
return nested_dict
def get_composition_from_string(comp_str):
"""validate and return composition from string `comp_str`."""
from pymatgen.core import Composition, Element
comp = Composition(comp_str)
for element in comp.elements:
Element(element)
formula = comp.get_integer_formula_and_factor()[0]
comp = Composition(formula)
return "".join(
[
"{}{}".format(key, int(value) if value > 1 else "")
for key, value in comp.as_dict().items()
]
)
def normalize_root_level(title):
"""convert root-level title into conventional identifier; non-identifiers
become part of shared (meta-)data. Returns: (is_general, title)"""
from pymatgen.core.composition import CompositionError
try:
composition = get_composition_from_string(title)
return False, composition
except (CompositionError, KeyError, TypeError, ValueError):
if mp_id_pattern.match(title.lower()):
return False, title.lower()
return True, title
def clean_value(value, unit="", convert_to_percent=False, max_dgts=3):
"""return clean value with maximum digits and optional unit and percent"""
dgts = max_dgts
value = str(value) if not isinstance(value, six.string_types) else value
try:
value = Decimal(value)
dgts = len(value.as_tuple().digits)
dgts = max_dgts if dgts > max_dgts else dgts
except DecimalException:
return value
if convert_to_percent:
value = Decimal(value) * Decimal("100")
unit = "%"
val = "{{:.{}g}}".format(dgts).format(value)
if unit:
val += " {}".format(unit)
return val
def strip_converter(text):
"""http://stackoverflow.com/questions/13385860"""
try:
text = text.strip()
if not text:
return ""
val = clean_value(text, max_dgts=6)
return str(Decimal(val))
except InvalidOperation:
return text
def read_csv(body, is_data_section=True, **kwargs):
"""run pandas.read_csv on (sub)section body"""
csv_comment_char = "#"
import pandas
body = body.strip()
if not body:
return None
from mpcontribs.io.core.components.tdata import Table
if is_data_section:
cur_line = 1
while 1:
body_split = body.split("\n", cur_line)
first_line = body_split[cur_line - 1].strip()
cur_line += 1
if first_line and not first_line.startswith(csv_comment_char):
break
sep = kwargs.get("sep", ",")
options = {"sep": sep, "header": 0}
header = [col.strip() for col in first_line.split(sep)]
body = "\n".join([sep.join(header), body_split[1]])
if first_line.startswith("level_"):
options.update({"index_col": [0, 1]})
ncols = len(header)
else:
options = {"sep": ":", "header": None, "index_col": 0}
ncols = 2
options.update(**kwargs)
converters = dict((col, strip_converter) for col in range(ncols))
return Table(
pandas.read_csv(
StringIO(body),
comment=csv_comment_char,
skipinitialspace=True,
squeeze=True,
converters=converters,
encoding="utf8",
**options
).dropna(how="all")
)
| mit |
drabastomek/practicalDataAnalysisCookbook | Codes/Chapter09/nlp_countWords.py | 1 | 1849 | import nltk
import re
import numpy as np
import matplotlib.pyplot as plt
def preprocess_data(text):
global sentences, tokenized
tokenizer = nltk.RegexpTokenizer(r'\w+')
sentences = nltk.sent_tokenize(text)
tokenized = [tokenizer.tokenize(s) for s in sentences]
# import the data
guns_laws = '../../Data/Chapter09/ST_gunLaws.txt'
with open(guns_laws, 'r') as f:
article = f.read()
# chunk into sentences and tokenize
sentences = []
tokenized = []
preprocess_data(article)
# part-of-speech tagging
tagged_sentences = [nltk.pos_tag(w) for w in tokenized]
# extract names entities -- regular expressions approach
tagged = []
pattern = '''
ENT: {<DT>?(<NNP|NNPS>)+}
'''
tokenizer = nltk.RegexpParser(pattern)
for sent in tagged_sentences:
tagged.append(tokenizer.parse(sent))
# keep named entities together
words = []
lemmatizer = nltk.WordNetLemmatizer()
for sentence in tagged:
for pos in sentence:
if type(pos) == nltk.tree.Tree:
words.append(' '.join([w[0] for w in pos]))
else:
words.append(lemmatizer.lemmatize(pos[0]))
# remove stopwords
stopwords = nltk.corpus.stopwords.words('english')
words = [w for w in words if w.lower() not in stopwords]
# and calculate frequencies
freq = nltk.FreqDist(words)
# sort descending on frequency
f = sorted(freq.items(), key=lambda x: x[1], reverse=True)
# print top words
top_words = [w for w in f if w[1] > 1]
print(top_words, len(top_words))
# plot 10 top words
top_words_transposed = list(zip(*top_words))
y_pos = np.arange(len(top_words_transposed[0][:10]))[::-1]
plt.barh(y_pos, top_words_transposed[1][:10],
align='center', alpha=0.5)
plt.yticks(y_pos, top_words_transposed[0][:10])
plt.xlabel('Frequency')
plt.ylabel('Top words')
plt.savefig('../../Data/Chapter09/charts/word_frequency.png',
dpi=300) | gpl-2.0 |
mattcoley/oxbridge-sentiment | oxbridgesentiment/sentiment/views.py | 2 | 1267 | from django.shortcuts import render
from django.http import HttpResponse
from django.template import loader
from .models import TweetGroup
from datetime import datetime, timedelta
from django.utils import timezone
import pandas as pd
import scrape
import json
from django.db.models import Max
def index(request):
return render(request, 'sentiment/index.html', {})
def update(request,name):
entry = TweetGroup.objects.filter(name = name).aggregate(Max('date_added'))
now = timezone.now()
if entry['date_added__max'] == None or entry['date_added__max'] < (now - timedelta(minutes=100)):
(positive,negative,neutral,best_text,worst_text,total_score) = scrape.likeability(name)
t = TweetGroup()
t.name = name
t.positive = positive
t.negative = negative
t.neutral = neutral
t.date_added = now
t.best_text = best_text
t.worst_text = worst_text
t.total_score = total_score
t.save()
t = TweetGroup.objects.filter(name = name).order_by('-date_added')[0]
return HttpResponse(json.dumps({'name':t.name,'positive':t.positive,'negative':t.negative,'neutral':t.neutral,'best-text':t.best_text,'worst-text':t.worst_text,'total-score':round(float(t.total_score),4)}))
| mit |
process-asl/process-asl | procasl/datasets.py | 2 | 9982 | import os
import glob
import warnings
import numpy as np
from sklearn.datasets.base import Bunch
from nilearn.datasets.utils import _fetch_files, _get_dataset_dir
from ._utils import _get_dataset_descr
def _single_glob(pattern):
"""Returns the file matching a given pattern. An error is raised if
no file/multiple files match the pattern
Parameters
----------
pattern : str
The pattern to match.
Returns
-------
output : str or None
The filename if existant.
"""
filenames = glob.glob(pattern)
if not filenames:
raise ValueError('Non exitant file with pattern {0}'.format(pattern))
if len(filenames) > 1:
raise ValueError('Non unique file with pattern {0}'.format(pattern))
return filenames[0]
def load_heroes_dataset(
subjects=None,
subjects_parent_directory='/volatile/asl_data/heroes/raw',
paths_patterns={'anat': 't1mri/acquisition1/anat*.nii',
'basal ASL': 'fMRI/acquisition1/basal_rawASL*.nii',
'basal CBF': 'B1map/acquisition1/basal_relCBF*.nii'}
):
"""Loads the NeuroSpin HEROES dataset.
Parameters
----------
subjects : sequence of int or None, optional
ids of subjects to load, default to loading all subjects.
subjects_parent_directory : str, optional
Path to the dataset folder containing all subjects folders.
paths_patterns : dict, optional
Input dictionary. Keys are the names of the images to load, values
are strings specifying the unique relative pattern specifying the
path to these images within each subject directory.
Returns
-------
dataset : dict
The absolute paths to the images for all subjects. Keys are the same
as the files_patterns keys, values are lists of strings.
"""
# Absolute paths of subjects folders
subjects_directories = [os.path.join(subjects_parent_directory, name)
for name in
sorted(os.listdir(subjects_parent_directory))
if os.path.isdir(os.path.join(
subjects_parent_directory, name))]
max_subjects = len(subjects_directories)
if subjects is None:
subjects = range(max_subjects)
else:
if max(subjects) > max_subjects:
raise ValueError('Got {0} subjects, you provided ids {1}'
''.format(max_subjects, str(subjects)))
subjects_directories = [subjects_directories[subject_id] for subject_id in
subjects]
# Build the path list for each image type
dataset = {}
for (image_type, file_pattern) in paths_patterns.iteritems():
dataset[image_type] = []
for subject_dir in subjects_directories:
dataset[image_type].append(
_single_glob(os.path.join(subject_dir, file_pattern)))
return dataset
def fetch_kirby(subjects=range(2), sessions=[1], data_dir=None, url=None,
resume=True, verbose=1):
"""Download and load the KIRBY multi-modal dataset.
Parameters
----------
subjects : sequence of int or None, optional
ids of subjects to load, default to loading 2 subjects.
sessions: iterable of int, optional
The sessions to load. Load only the first session by default.
data_dir: string, optional
Path of the data directory. Used to force data storage in a specified
location. Default: None
url: string, optional
Override download URL. Used for test only (or if you setup a mirror of
the data). Default: None
Returns
-------
data: sklearn.datasets.base.Bunch
Dictionary-like object, the interest attributes are :
- 'anat': Paths to structural MPRAGE images
- 'asl': Paths to ASL images
- 'm0': Paths to ASL M0 images
Notes
------
This dataset is composed of 2 sessions of 21 participants (11 males) at 3T.
Imaging modalities include MPRAGE, FLAIR,
DTI, resting state fMRI, B0 and B1 field maps, ASL, VASO, quantitative T1
mapping, quantitative T2 mapping, and magnetization transfer imaging.
For each session, we only download MPRAGE and ASL data.
More details about this dataset can be found here :
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3020263
http://mri.kennedykrieger.org/databases.html
Paper to cite
-------------
`Multi-Parametric Neuroimaging Reproducibility: A 3T Resource Study
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3020263>`_
Bennett. A. Landman, Alan J. Huang, Aliya Gifford,Deepti S. Vikram,
Issel Anne L. Lim, Jonathan A.D. Farrell, John A. Bogovic, Jun Hua,
Min Chen,
Samson Jarso, Seth A. Smith, Suresh Joel, Susumu Mori, James J. Pekar,
Peter B. Barker, Jerry L. Prince, and Peter C.M. van Zijl.
NeuroImage. (2010)
NIHMS/PMC:252138 doi:10.1016/j.neuroimage.2010.11.047
Licence
-------
`BIRN Data License
<http://www.nbirn.net/bdr/Data_Use_Agreement_09_19_07-1.pdf>`_
"""
if url is None:
url = 'https://www.nitrc.org/frs/downloadlink.php/'
# Preliminary checks and declarations
dataset_name = 'kirby'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
verbose=verbose)
subject_ids = np.array([
'849', '934', '679', '906', '913', '142', '127', '742', '422', '815',
'906', '239', '916', '959', '814', '505', '959', '492', '239', '142',
'815', '679', '800', '916', '849', '814', '800', '656', '742', '113',
'913', '502', '113', '127', '505', '502', '934', '492', '346', '656',
'346', '422'])
nitrc_ids = np.arange(2201, 2243)
ids = np.arange(1, 43)
# Group indices by session
_, indices1 = np.unique(subject_ids, return_index=True)
subject_ids1 = subject_ids[sorted(indices1)]
nitrc_ids1 = nitrc_ids[sorted(indices1)]
ids1 = ids[sorted(indices1)]
tuple_indices = [np.where(subject_ids == s)[0] for s in subject_ids1]
indices2 = [idx1 if idx1 not in indices1 else idx2
for (idx1, idx2) in tuple_indices]
subject_ids2 = subject_ids[indices2]
nitrc_ids2 = nitrc_ids[indices2]
ids2 = ids[indices2]
# Check arguments
max_subjects = len(subject_ids)
if max(subjects) > max_subjects:
warnings.warn('Warning: there are only {0} subjects'.format(
max_subjects))
subjects = range(max_subjects)
unique_subjects, indices = np.unique(subjects, return_index=True)
if len(unique_subjects) < len(subjects):
warnings.warn('Warning: Duplicate subjects, removing them.')
subjects = unique_subjects[np.argsort(indices)]
n_subjects = len(subjects)
archives = [
[url + '{0}/KKI2009-{1:02}.tar.bz2'.format(nitrc_id, id) for
(nitrc_id, id) in zip(nitrc_ids1, ids1)],
[url + '{0}/KKI2009-{1:02}.tar.bz2'.format(nitrc_id, id) for
(nitrc_id, id) in zip(nitrc_ids2, ids2)]
]
anat1 = [os.path.join('session1', subject,
'KKI2009-{0:02}-MPRAGE.nii'.format(i))
for subject, i in zip(subject_ids1, ids1)]
anat2 = [os.path.join('session2', subject,
'KKI2009-{0:02}-MPRAGE.nii'.format(i))
for subject, i in zip(subject_ids2, ids2)]
asl1 = [os.path.join('session1', subject,
'KKI2009-{0:02}-ASL.nii'.format(i))
for subject, i in zip(subject_ids1, ids1)]
asl2 = [os.path.join('session2', subject,
'KKI2009-{0:02}-ASL.nii'.format(i))
for subject, i in zip(subject_ids2, ids2)]
m01 = [os.path.join('session1', subject,
'KKI2009-{0:02}-ASLM0.nii'.format(i))
for subject, i in zip(subject_ids1, ids1)]
m02 = [os.path.join('session2', subject,
'KKI2009-{0:02}-ASLM0.nii'.format(i))
for subject, i in zip(subject_ids2, ids2)]
target = [
[os.path.join('session1', subject, 'KKI2009-{0:02}.tar.bz2'.format(id))
for (subject, id) in zip(subject_ids1, ids1)],
[os.path.join('session2', subject, 'KKI2009-{0:02}.tar.bz2'.format(id))
for (subject, id) in zip(subject_ids2, ids2)]
]
anat = [anat1, anat2]
asl = [asl1, asl2]
m0 = [m01, m02]
source_anat = []
source_asl = []
source_m0 = []
source_archives = []
session = []
target_archives = []
for i in sessions:
if not (i in [1, 2]):
raise ValueError('KIRBY dataset session id must be in [1, 2]')
source_anat += [anat[i - 1][subject] for subject in subjects]
source_asl += [asl[i - 1][subject] for subject in subjects]
source_m0 += [m0[i - 1][subject] for subject in subjects]
source_archives += [archives[i - 1][subject] for subject in subjects]
target_archives += [target[i - 1][subject] for subject in subjects]
session += [i] * n_subjects
# Dataset description
fdescr = _get_dataset_descr(dataset_name)
# Call fetch_files once per subject.
asl = []
m0 = []
anat = []
for anat_u, asl_u, m0_u, archive, target in zip(source_anat, source_asl,
source_m0, source_archives,
target_archives):
n, a, m = _fetch_files(
data_dir,
[(anat_u, archive, {'uncompress': True, 'move': target}),
(asl_u, archive, {'uncompress': True, 'move': target}),
(m0_u, archive, {'uncompress': True, 'move': target})],
verbose=verbose)
anat.append(n)
asl.append(a)
m0.append(m)
return Bunch(anat=anat, asl=asl, m0=m0, session=session,
description=fdescr)
| bsd-3-clause |
Purg/SMQTK | python/smqtk/bin/classifier_kfold_validation.py | 1 | 11461 | """
Helper utility for cross validating a supervised classifier configuration.
The classifier used should NOT be configured to save its model since this
process requires us to train the classifier multiple times.
Configuration
-------------
- plugins
- supervised_classifier
Supervised Classifier implementation configuration to use. This
should not be set to use a persistent model if able.
- descriptor_index
Index to draw descriptors to classify from.
- cross_validation
- truth_labels
Path to a CSV file containing descriptor UUID the truth label
associations. This defines what descriptors are used from the given
index. We error if any descriptor UUIDs listed here are not
available in the given descriptor index. This file should be in
[uuid, label] column format.
- num_folds
Number of folds to make for cross validation.
- random_seed
Optional fixed seed for the
- classification_use_multiprocessing
If we should use multiprocessing (vs threading) when classifying
elements.
- pr_curves
- enabled
If Precision/Recall plots should be generated.
- show
If we should attempt to show the graph after it has been generated
(matplotlib).
- output_directory
Directory to save generated plots to. If None, we will not save
plots. Otherwise we will create the directory (and required parent
directories) if it does not exist.
- file_prefix
String prefix to prepend to standard plot file names.
- roc_curves
- enabled
If ROC curves should be generated
- show
If we should attempt to show the plot after it has been generated
(matplotlib).
- output_directory
Directory to save generated plots to. If None, we will not save
plots. Otherwise we will create the directory (and required parent
directories) if it does not exist.
- file_prefix
String prefix to prepend to standard plot file names.
"""
import csv
import logging
import os
import matplotlib.pyplot as plt
import numpy
import sklearn.cross_validation
import sklearn.metrics
from smqtk.algorithms import get_classifier_impls
from smqtk.algorithms.classifier import SupervisedClassifier
from smqtk.representation import (
ClassificationElementFactory,
get_descriptor_index_impls,
)
from smqtk.representation.classification_element.memory import \
MemoryClassificationElement
from smqtk.utils import (
bin_utils,
file_utils,
plugin,
)
__author__ = "[email protected]"
def get_supervised_classifier_impls():
return get_classifier_impls(sub_interface=SupervisedClassifier)
def default_config():
return {
"plugins": {
"supervised_classifier":
plugin.make_config(get_supervised_classifier_impls()),
"descriptor_index":
plugin.make_config(get_descriptor_index_impls()),
},
"cross_validation": {
"truth_labels": None,
"num_folds": 6,
"random_seed": None,
"classification_use_multiprocessing": True,
},
"pr_curves": {
"enabled": True,
"show": False,
"output_directory": None,
"file_prefix": None,
},
"roc_curves": {
"enabled": True,
"show": False,
"output_directory": None,
"file_prefix": None,
},
}
def cli_parser():
return bin_utils.basic_cli_parser(__doc__)
def classifier_kfold_validation():
args = cli_parser().parse_args()
config = bin_utils.utility_main_helper(default_config, args)
log = logging.getLogger(__name__)
#
# Load configurations / Setup data
#
use_mp = config['cross_validation']['classification_use_multiprocessing']
pr_enabled = config['pr_curves']['enabled']
pr_output_dir = config['pr_curves']['output_directory']
pr_file_prefix = config['pr_curves']['file_prefix'] or ''
pr_show = config['pr_curves']['show']
roc_enabled = config['roc_curves']['enabled']
roc_output_dir = config['roc_curves']['output_directory']
roc_file_prefix = config['roc_curves']['file_prefix'] or ''
roc_show = config['roc_curves']['show']
log.info("Initializing DescriptorIndex (%s)",
config['plugins']['descriptor_index']['type'])
#: :type: smqtk.representation.DescriptorIndex
descriptor_index = plugin.from_plugin_config(
config['plugins']['descriptor_index'],
get_descriptor_index_impls()
)
log.info("Loading classifier configuration")
#: :type: dict
classifier_config = config['plugins']['supervised_classifier']
# Always use in-memory ClassificationElement since we are retraining the
# classifier and don't want possible element caching
#: :type: ClassificationElementFactory
classification_factory = ClassificationElementFactory(
MemoryClassificationElement, {}
)
log.info("Loading truth data")
#: :type: list[str]
uuids = []
#: :type: list[str]
truth_labels = []
with open(config['cross_validation']['truth_labels']) as f:
f_csv = csv.reader(f)
for row in f_csv:
uuids.append(row[0])
truth_labels.append(row[1])
#: :type: numpy.ndarray[str]
uuids = numpy.array(uuids)
#: :type: numpy.ndarray[str]
truth_labels = numpy.array(truth_labels)
#
# Cross validation
#
kfolds = sklearn.cross_validation.StratifiedKFold(
truth_labels, config['cross_validation']['num_folds'],
random_state=config['cross_validation']['random_seed']
)
"""
Truth and classification probability results for test data per fold.
Format:
{
0: {
'<label>': {
"truth": [...], # Parallel truth and classification
"proba": [...], # probability values
},
...
},
...
}
"""
fold_data = {}
i = 0
for train, test in kfolds:
log.info("Fold %d", i)
log.info("-- %d training examples", len(train))
log.info("-- %d test examples", len(test))
fold_data[i] = {}
log.info("-- creating classifier")
#: :type: SupervisedClassifier
classifier = plugin.from_plugin_config(
classifier_config,
get_supervised_classifier_impls()
)
log.info("-- gathering descriptors")
#: :type: dict[str, list[smqtk.representation.DescriptorElement]]
pos_map = {}
for idx in train:
if truth_labels[idx] not in pos_map:
pos_map[truth_labels[idx]] = []
pos_map[truth_labels[idx]].append(
descriptor_index.get_descriptor(uuids[idx])
)
log.info("-- Training classifier")
classifier.train(pos_map)
log.info("-- Classifying test set")
m = classifier.classify_async(
(descriptor_index.get_descriptor(uuids[idx]) for idx in test),
classification_factory,
use_multiprocessing=use_mp, ri=1.0
)
uuid2c = dict((d.uuid(), c.get_classification())
for d, c in m.iteritems())
log.info("-- Pairing truth and computed probabilities")
# Only considering positive labels
for t_label in pos_map:
fold_data[i][t_label] = {
"truth": [l == t_label for l in truth_labels[test]],
"proba": [uuid2c[uuid][t_label] for uuid in uuids[test]]
}
i += 1
#
# Curve generation
#
if pr_enabled:
make_pr_curves(fold_data, pr_output_dir, pr_file_prefix, pr_show)
if roc_enabled:
make_roc_curves(fold_data, roc_output_dir, roc_file_prefix, roc_show)
def format_plt(title, x_label, y_label):
plt.xlim([0., 1.])
plt.ylim([0., 1.05])
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
plt.legend(loc='best')
def save_plt(output_dir, file_name, show):
file_utils.safe_create_dir(output_dir)
save_path = os.path.join(output_dir, file_name)
plt.savefig(save_path)
if show:
plt.show()
def make_curves(log, skl_curve_func, title_hook, x_label, y_label, fold_data,
output_dir, plot_prefix, show):
"""
Generic method for PR/ROC curve generation
:param skl_curve_func: scikit-learn curve generation function. This should
be wrapped to return (x, y) value arrays.
"""
file_utils.safe_create_dir(output_dir)
log.info("Generating %s curves for per-folds and overall", title_hook)
# in-order list of fold (x, y) value lists
fold_xy = []
fold_auc = []
# all truth and proba pairs
g_truth = []
g_proba = []
for i in fold_data:
log.info("-- Fold %i", i)
f_truth = []
f_proba = []
plt.clf()
for label in fold_data[i]:
log.info(" -- label '%s'", label)
l_truth = fold_data[i][label]['truth']
l_proba = fold_data[i][label]['proba']
x, y = skl_curve_func(l_truth, l_proba)
auc = sklearn.metrics.auc(x, y)
plt.plot(x, y, label="class '%s' (auc=%f)" % (label, auc))
f_truth.extend(l_truth)
f_proba.extend(l_proba)
# Plot for fold
x, y = skl_curve_func(f_truth, f_proba)
auc = sklearn.metrics.auc(x, y)
plt.plot(x, y, label="Fold (auc=%f)" % auc)
format_plt("Classifier %s - Fold %d" % (title_hook, i),
x_label, y_label)
filename = plot_prefix + 'fold_%d.png' % i
save_plt(output_dir, filename, show)
fold_xy.append([x, y])
fold_auc.append(auc)
g_truth.extend(f_truth)
g_proba.extend(f_proba)
# Plot global curve
log.info("-- All folds")
plt.clf()
for i in fold_data:
plt.plot(fold_xy[i][0], fold_xy[i][1],
label="Fold %d (auc=%f)" % (i, fold_auc[i]))
x, y = skl_curve_func(g_truth, g_proba)
auc = sklearn.metrics.auc(x, y)
plt.plot(x, y, label="All (auc=%f)" % auc)
format_plt("Classifier %s - Validation" % title_hook, x_label, y_label)
filename = plot_prefix + "validation.png"
save_plt(output_dir, filename, show)
def make_pr_curves(fold_data, output_dir, plot_prefix, show):
log = logging.getLogger(__name__)
def skl_pr_curve(truth, proba):
p, r, _ = sklearn.metrics.precision_recall_curve(truth, proba)
return r, p
make_curves(log, skl_pr_curve, "PR", "Recall", "Precision", fold_data,
output_dir, plot_prefix + 'pr.', show)
def make_roc_curves(fold_data, output_dir, plot_prefix, show):
log = logging.getLogger(__name__)
def skl_roc_curve(truth, proba):
fpr, tpr, _ = sklearn.metrics.roc_curve(truth, proba)
return fpr, tpr
make_curves(log, skl_roc_curve, "ROC", "False Positive Rate",
"True Positive Rate", fold_data, output_dir,
plot_prefix + 'roc.', show)
if __name__ == '__main__':
classifier_kfold_validation()
| bsd-3-clause |
dimroc/tensorflow-mnist-tutorial | lib/python3.6/site-packages/matplotlib/tests/test_agg.py | 5 | 9502 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import io
import os
from distutils.version import LooseVersion as V
import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from matplotlib.image import imread
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.testing.decorators import (
cleanup, image_comparison, knownfailureif)
from matplotlib import pyplot as plt
from matplotlib import collections
from matplotlib import path
from matplotlib import transforms as mtransforms
@cleanup
def test_repeated_save_with_alpha():
# We want an image which has a background color of bluish green, with an
# alpha of 0.25.
fig = Figure([1, 0.4])
canvas = FigureCanvas(fig)
fig.set_facecolor((0, 1, 0.4))
fig.patch.set_alpha(0.25)
# The target color is fig.patch.get_facecolor()
buf = io.BytesIO()
fig.savefig(buf,
facecolor=fig.get_facecolor(),
edgecolor='none')
# Save the figure again to check that the
# colors don't bleed from the previous renderer.
buf.seek(0)
fig.savefig(buf,
facecolor=fig.get_facecolor(),
edgecolor='none')
# Check the first pixel has the desired color & alpha
# (approx: 0, 1.0, 0.4, 0.25)
buf.seek(0)
assert_array_almost_equal(tuple(imread(buf)[0, 0]),
(0.0, 1.0, 0.4, 0.250),
decimal=3)
@cleanup
def test_large_single_path_collection():
buff = io.BytesIO()
# Generates a too-large single path in a path collection that
# would cause a segfault if the draw_markers optimization is
# applied.
f, ax = plt.subplots()
collection = collections.PathCollection(
[path.Path([[-10, 5], [10, 5], [10, -5], [-10, -5], [-10, 5]])])
ax.add_artist(collection)
ax.set_xlim(10**-3, 1)
plt.savefig(buff)
def report_memory(i):
pid = os.getpid()
a2 = os.popen('ps -p %d -o rss,sz' % pid).readlines()
print(i, ' ', a2[1], end=' ')
return int(a2[1].split()[0])
# This test is disabled -- it uses old API. -ADS 2009-09-07
## def test_memleak():
## """Test agg backend for memory leaks."""
## from matplotlib.ft2font import FT2Font
## from numpy.random import rand
## from matplotlib.backend_bases import GraphicsContextBase
## from matplotlib.backends._backend_agg import RendererAgg
## fontname = '/usr/local/share/matplotlib/Vera.ttf'
## N = 200
## for i in range( N ):
## gc = GraphicsContextBase()
## gc.set_clip_rectangle( [20, 20, 20, 20] )
## o = RendererAgg( 400, 400, 72 )
## for j in range( 50 ):
## xs = [ 400*int(rand()) for k in range(8) ]
## ys = [ 400*int(rand()) for k in range(8) ]
## rgb = (1, 0, 0)
## pnts = zip( xs, ys )
## o.draw_polygon( gc, rgb, pnts )
## o.draw_polygon( gc, None, pnts )
## for j in range( 50 ):
## x = [ 400*int(rand()) for k in range(4) ]
## y = [ 400*int(rand()) for k in range(4) ]
## o.draw_lines( gc, x, y )
## for j in range( 50 ):
## args = [ 400*int(rand()) for k in range(4) ]
## rgb = (1, 0, 0)
## o.draw_rectangle( gc, rgb, *args )
## if 1: # add text
## font = FT2Font( fontname )
## font.clear()
## font.set_text( 'hi mom', 60 )
## font.set_size( 12, 72 )
## o.draw_text_image( font.get_image(), 30, 40, gc )
## fname = "agg_memleak_%05d.png"
## o.write_png( fname % i )
## val = report_memory( i )
## if i==1: start = val
## end = val
## avgMem = (end - start) / float(N)
## print 'Average memory consumed per loop: %1.4f\n' % (avgMem)
## #TODO: Verify the expected mem usage and approximate tolerance that
## # should be used
## #self.checkClose( 0.32, avgMem, absTol = 0.1 )
## # w/o text and w/o write_png: Average memory consumed per loop: 0.02
## # w/o text and w/ write_png : Average memory consumed per loop: 0.3400
## # w/ text and w/ write_png : Average memory consumed per loop: 0.32
@cleanup
def test_marker_with_nan():
# This creates a marker with nans in it, which was segfaulting the
# Agg backend (see #3722)
fig, ax = plt.subplots(1)
steps = 1000
data = np.arange(steps)
ax.semilogx(data)
ax.fill_between(data, data*0.8, data*1.2)
buf = io.BytesIO()
fig.savefig(buf, format='png')
@cleanup
def test_long_path():
buff = io.BytesIO()
fig, ax = plt.subplots()
np.random.seed(0)
points = np.random.rand(70000)
ax.plot(points)
fig.savefig(buff, format='png')
@image_comparison(baseline_images=['agg_filter'],
extensions=['png'], remove_text=True)
def test_agg_filter():
def smooth1d(x, window_len):
s = np.r_[2*x[0] - x[window_len:1:-1],
x,
2*x[-1] - x[-1:-window_len:-1]]
w = np.hanning(window_len)
y = np.convolve(w/w.sum(), s, mode='same')
return y[window_len-1:-window_len+1]
def smooth2d(A, sigma=3):
window_len = max(int(sigma), 3)*2 + 1
A1 = np.array([smooth1d(x, window_len) for x in np.asarray(A)])
A2 = np.transpose(A1)
A3 = np.array([smooth1d(x, window_len) for x in A2])
A4 = np.transpose(A3)
return A4
class BaseFilter(object):
def prepare_image(self, src_image, dpi, pad):
ny, nx, depth = src_image.shape
padded_src = np.zeros([pad*2 + ny, pad*2 + nx, depth], dtype="d")
padded_src[pad:-pad, pad:-pad, :] = src_image[:, :, :]
return padded_src # , tgt_image
def get_pad(self, dpi):
return 0
def __call__(self, im, dpi):
pad = self.get_pad(dpi)
padded_src = self.prepare_image(im, dpi, pad)
tgt_image = self.process_image(padded_src, dpi)
return tgt_image, -pad, -pad
class OffsetFilter(BaseFilter):
def __init__(self, offsets=None):
if offsets is None:
self.offsets = (0, 0)
else:
self.offsets = offsets
def get_pad(self, dpi):
return int(max(*self.offsets)/72.*dpi)
def process_image(self, padded_src, dpi):
ox, oy = self.offsets
a1 = np.roll(padded_src, int(ox/72.*dpi), axis=1)
a2 = np.roll(a1, -int(oy/72.*dpi), axis=0)
return a2
class GaussianFilter(BaseFilter):
"simple gauss filter"
def __init__(self, sigma, alpha=0.5, color=None):
self.sigma = sigma
self.alpha = alpha
if color is None:
self.color = (0, 0, 0)
else:
self.color = color
def get_pad(self, dpi):
return int(self.sigma*3/72.*dpi)
def process_image(self, padded_src, dpi):
tgt_image = np.zeros_like(padded_src)
aa = smooth2d(padded_src[:, :, -1]*self.alpha,
self.sigma/72.*dpi)
tgt_image[:, :, -1] = aa
tgt_image[:, :, :-1] = self.color
return tgt_image
class DropShadowFilter(BaseFilter):
def __init__(self, sigma, alpha=0.3, color=None, offsets=None):
self.gauss_filter = GaussianFilter(sigma, alpha, color)
self.offset_filter = OffsetFilter(offsets)
def get_pad(self, dpi):
return max(self.gauss_filter.get_pad(dpi),
self.offset_filter.get_pad(dpi))
def process_image(self, padded_src, dpi):
t1 = self.gauss_filter.process_image(padded_src, dpi)
t2 = self.offset_filter.process_image(t1, dpi)
return t2
if V(np.__version__) < V('1.7.0'):
return
fig = plt.figure()
ax = fig.add_subplot(111)
# draw lines
l1, = ax.plot([0.1, 0.5, 0.9], [0.1, 0.9, 0.5], "bo-",
mec="b", mfc="w", lw=5, mew=3, ms=10, label="Line 1")
l2, = ax.plot([0.1, 0.5, 0.9], [0.5, 0.2, 0.7], "ro-",
mec="r", mfc="w", lw=5, mew=3, ms=10, label="Line 1")
gauss = DropShadowFilter(4)
for l in [l1, l2]:
# draw shadows with same lines with slight offset.
xx = l.get_xdata()
yy = l.get_ydata()
shadow, = ax.plot(xx, yy)
shadow.update_from(l)
# offset transform
ot = mtransforms.offset_copy(l.get_transform(), ax.figure,
x=4.0, y=-6.0, units='points')
shadow.set_transform(ot)
# adjust zorder of the shadow lines so that it is drawn below the
# original lines
shadow.set_zorder(l.get_zorder() - 0.5)
shadow.set_agg_filter(gauss)
shadow.set_rasterized(True) # to support mixed-mode renderers
ax.set_xlim(0., 1.)
ax.set_ylim(0., 1.)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
@cleanup
def test_too_large_image():
fig = plt.figure(figsize=(300, 1000))
buff = io.BytesIO()
assert_raises(ValueError, fig.savefig, buff)
if __name__ == "__main__":
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| apache-2.0 |
rajat1994/scikit-learn | sklearn/grid_search.py | 32 | 36586 | """
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>
# Andreas Mueller <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin, ChangedBehaviorWarning
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
This estimator will be cloned and then fitted.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each grid point.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default 1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, default=3
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if estimator is a classifier
and the target y is binary or multiclass, or the number
of folds in KFold otherwise.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a an hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each parameter setting.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, optional
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if estimator is a classifier
and the target y is binary or multiclass, or the number
of folds in KFold otherwise.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
| bsd-3-clause |
AlirezaShahabi/zipline | tests/modelling/test_factor.py | 9 | 2969 | """
Tests for Factor terms.
"""
from unittest import TestCase
from numpy import (
array,
)
from numpy.testing import assert_array_equal
from pandas import (
DataFrame,
date_range,
Int64Index,
)
from six import iteritems
from zipline.errors import UnknownRankMethod
from zipline.modelling.factor import TestingFactor
class F(TestingFactor):
inputs = ()
window_length = 0
class FactorTestCase(TestCase):
def setUp(self):
self.f = F()
self.dates = date_range('2014-01-01', periods=5, freq='D')
self.assets = Int64Index(range(5))
self.mask = DataFrame(True, index=self.dates, columns=self.assets)
def tearDown(self):
pass
def test_bad_input(self):
with self.assertRaises(UnknownRankMethod):
self.f.rank("not a real rank method")
def test_rank(self):
# Generated with:
# data = arange(25).reshape(5, 5).transpose() % 4
data = array([[0, 1, 2, 3, 0],
[1, 2, 3, 0, 1],
[2, 3, 0, 1, 2],
[3, 0, 1, 2, 3],
[0, 1, 2, 3, 0]])
expected_ranks = {
'ordinal': array([[1., 3., 4., 5., 2.],
[2., 4., 5., 1., 3.],
[3., 5., 1., 2., 4.],
[4., 1., 2., 3., 5.],
[1., 3., 4., 5., 2.]]),
'average': array([[1.5, 3., 4., 5., 1.5],
[2.5, 4., 5., 1., 2.5],
[3.5, 5., 1., 2., 3.5],
[4.5, 1., 2., 3., 4.5],
[1.5, 3., 4., 5., 1.5]]),
'min': array([[1., 3., 4., 5., 1.],
[2., 4., 5., 1., 2.],
[3., 5., 1., 2., 3.],
[4., 1., 2., 3., 4.],
[1., 3., 4., 5., 1.]]),
'max': array([[2., 3., 4., 5., 2.],
[3., 4., 5., 1., 3.],
[4., 5., 1., 2., 4.],
[5., 1., 2., 3., 5.],
[2., 3., 4., 5., 2.]]),
'dense': array([[1., 2., 3., 4., 1.],
[2., 3., 4., 1., 2.],
[3., 4., 1., 2., 3.],
[4., 1., 2., 3., 4.],
[1., 2., 3., 4., 1.]]),
}
# Test with the default, which should be 'ordinal'.
default_result = self.f.rank().compute_from_arrays([data], self.mask)
assert_array_equal(default_result, expected_ranks['ordinal'])
# Test with each method passed explicitly.
for method, expected_result in iteritems(expected_ranks):
result = self.f.rank(method=method).compute_from_arrays(
[data],
self.mask,
)
assert_array_equal(result, expected_ranks[method])
| apache-2.0 |
dpshelio/scikit-image | skimage/feature/util.py | 37 | 4729 | import numpy as np
from ..util import img_as_float
from .._shared.utils import assert_nD
class FeatureDetector(object):
def __init__(self):
self.keypoints_ = np.array([])
def detect(self, image):
"""Detect keypoints in image.
Parameters
----------
image : 2D array
Input image.
"""
raise NotImplementedError()
class DescriptorExtractor(object):
def __init__(self):
self.descriptors_ = np.array([])
def extract(self, image, keypoints):
"""Extract feature descriptors in image for given keypoints.
Parameters
----------
image : 2D array
Input image.
keypoints : (N, 2) array
Keypoint locations as ``(row, col)``.
"""
raise NotImplementedError()
def plot_matches(ax, image1, image2, keypoints1, keypoints2, matches,
keypoints_color='k', matches_color=None, only_matches=False):
"""Plot matched features.
Parameters
----------
ax : matplotlib.axes.Axes
Matches and image are drawn in this ax.
image1 : (N, M [, 3]) array
First grayscale or color image.
image2 : (N, M [, 3]) array
Second grayscale or color image.
keypoints1 : (K1, 2) array
First keypoint coordinates as ``(row, col)``.
keypoints2 : (K2, 2) array
Second keypoint coordinates as ``(row, col)``.
matches : (Q, 2) array
Indices of corresponding matches in first and second set of
descriptors, where ``matches[:, 0]`` denote the indices in the first
and ``matches[:, 1]`` the indices in the second set of descriptors.
keypoints_color : matplotlib color, optional
Color for keypoint locations.
matches_color : matplotlib color, optional
Color for lines which connect keypoint matches. By default the
color is chosen randomly.
only_matches : bool, optional
Whether to only plot matches and not plot the keypoint locations.
"""
image1 = img_as_float(image1)
image2 = img_as_float(image2)
new_shape1 = list(image1.shape)
new_shape2 = list(image2.shape)
if image1.shape[0] < image2.shape[0]:
new_shape1[0] = image2.shape[0]
elif image1.shape[0] > image2.shape[0]:
new_shape2[0] = image1.shape[0]
if image1.shape[1] < image2.shape[1]:
new_shape1[1] = image2.shape[1]
elif image1.shape[1] > image2.shape[1]:
new_shape2[1] = image1.shape[1]
if new_shape1 != image1.shape:
new_image1 = np.zeros(new_shape1, dtype=image1.dtype)
new_image1[:image1.shape[0], :image1.shape[1]] = image1
image1 = new_image1
if new_shape2 != image2.shape:
new_image2 = np.zeros(new_shape2, dtype=image2.dtype)
new_image2[:image2.shape[0], :image2.shape[1]] = image2
image2 = new_image2
image = np.concatenate([image1, image2], axis=1)
offset = image1.shape
if not only_matches:
ax.scatter(keypoints1[:, 1], keypoints1[:, 0],
facecolors='none', edgecolors=keypoints_color)
ax.scatter(keypoints2[:, 1] + offset[1], keypoints2[:, 0],
facecolors='none', edgecolors=keypoints_color)
ax.imshow(image, interpolation='nearest', cmap='gray')
ax.axis((0, 2 * offset[1], offset[0], 0))
for i in range(matches.shape[0]):
idx1 = matches[i, 0]
idx2 = matches[i, 1]
if matches_color is None:
color = np.random.rand(3, 1)
else:
color = matches_color
ax.plot((keypoints1[idx1, 1], keypoints2[idx2, 1] + offset[1]),
(keypoints1[idx1, 0], keypoints2[idx2, 0]),
'-', color=color)
def _prepare_grayscale_input_2D(image):
image = np.squeeze(image)
assert_nD(image, 2)
return img_as_float(image)
def _mask_border_keypoints(image_shape, keypoints, distance):
"""Mask coordinates that are within certain distance from the image border.
Parameters
----------
image_shape : (2, ) array_like
Shape of the image as ``(rows, cols)``.
keypoints : (N, 2) array
Keypoint coordinates as ``(rows, cols)``.
distance : int
Image border distance.
Returns
-------
mask : (N, ) bool array
Mask indicating if pixels are within the image (``True``) or in the
border region of the image (``False``).
"""
rows = image_shape[0]
cols = image_shape[1]
mask = (((distance - 1) < keypoints[:, 0])
& (keypoints[:, 0] < (rows - distance + 1))
& ((distance - 1) < keypoints[:, 1])
& (keypoints[:, 1] < (cols - distance + 1)))
return mask
| bsd-3-clause |
alfredfrancis/ai-chatbot-framework | app/nlu/classifiers/tf_intent_classifer.py | 1 | 5236 | import os
import time
import cloudpickle
import numpy as np
import spacy
import tensorflow as tf
from sklearn.preprocessing import LabelBinarizer
from tensorflow.python.keras import Sequential
from tensorflow.python.layers.core import Dense
from tensorflow.python.layers.core import Dropout
np.random.seed(1)
class TfIntentClassifier:
def __init__(self):
self.model = None
self.nlp = spacy.load('en')
self.label_encoder = LabelBinarizer()
self.graph = None
def train(self, X, y, models_dir=None, verbose=True):
"""
Train intent classifier for given training data
:param X:
:param y:
:param models_dir:
:param verbose:
:return:
"""
def create_model():
"""
Define and return tensorflow model.
"""
model = Sequential()
model.add(Dense(256, activation=tf.nn.relu,
input_shape=(vocab_size,)))
model.add(Dropout(0.2))
model.add(Dense(128, activation=tf.nn.relu))
model.add(Dropout(0.2))
model.add(Dense(num_labels, activation=tf.nn.softmax))
"""
tried:
loss functions => categorical_crossentropy, binary_crossentropy
optimizers => adam, rmsprop
"""
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
return model
# spacy context vector size
vocab_size = 384
# create spacy doc vector matrix
x_train = np.array([list(self.nlp(x).vector) for x in X])
num_labels = len(set(y))
self.label_encoder.fit(y)
y_train = self.label_encoder.transform(y)
del self.model
tf.keras.backend.clear_session()
time.sleep(3)
self.model = create_model()
# start training
self.model.fit(x_train, y_train, shuffle=True, epochs=300, verbose=1)
if models_dir:
tf.keras.models.save_model(
self.model,
os.path.join(models_dir, "tf_intent_model.hd5")
)
if verbose:
print("TF Model written out to {}"
.format(os.path.join(models_dir, "tf_intent_model.hd5")))
cloudpickle.dump(self.label_encoder, open(
os.path.join(models_dir, "labels.pkl"), 'wb'))
if verbose:
print("Labels written out to {}"
.format(os.path.join(models_dir, "labels.pkl")))
def load(self, models_dir):
try:
del self.model
tf.keras.backend.clear_session()
self.model = tf.keras.models.load_model(
os.path.join(models_dir, "tf_intent_model.hd5"), compile=True)
self.graph = tf.get_default_graph()
print("Tf model loaded")
with open(os.path.join(models_dir, "labels.pkl"), 'rb') as f:
self.label_encoder = cloudpickle.load(f)
print("Labels model loaded")
except IOError:
return False
def predict(self, text):
"""
Predict class label for given model
:param text:
:return:
"""
return self.process(text)
def predict_proba(self, x):
"""Given a bow vector of an input text, predict most probable label.
Returns only the most likely label.
:param x: raw input text
:return: tuple of first, the most probable label and second,
its probability"""
x_predict = [self.nlp(x).vector]
with self.graph.as_default():
pred_result = self.model.predict(np.array([x_predict[0]]))
sorted_indices = np.fliplr(np.argsort(pred_result, axis=1))
return sorted_indices, pred_result[:, sorted_indices]
def process(self, x, return_type="intent", INTENT_RANKING_LENGTH=5):
"""Returns the most likely intent and
its probability for the input text."""
if not self.model:
print("no class")
intent = None
intent_ranking = []
else:
intents, probabilities = self.predict_proba(x)
intents = [self.label_encoder.classes_[intent]
for intent in intents.flatten()]
probabilities = probabilities.flatten()
if len(intents) > 0 and len(probabilities) > 0:
ranking = list(zip(list(intents), list(probabilities)))
ranking = ranking[:INTENT_RANKING_LENGTH]
intent = {"intent": intents[0],
"confidence": float("%.2f" % probabilities[0])}
intent_ranking = [{"intent": intent_name,
"confidence": float("%.2f" % score)}
for intent_name, score in ranking]
else:
intent = {"name": None, "confidence": 0.0}
intent_ranking = []
if return_type == "intent":
return intent
else:
return intent_ranking
| mit |
anders-dc/ns2dfd | ns2dfd.py | 1 | 14344 | #!/usr/bin/env python
import numpy
import subprocess
import vtk
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class fluid:
def __init__(self, name = 'unnamed'):
'''
A Navier-Stokes two-dimensional fluid flow simulation object. Most
simulation values are assigned default values upon initialization.
:param name: Simulation identifier
:type name: str
'''
self.sim_id = name
self.init_grid()
self.current_time()
self.end_time()
self.file_time()
self.safety_factor()
self.max_iterations()
self.tolerance_criteria()
self.relaxation_parameter()
self.upwind_differencing_factor()
self.boundary_conditions()
self.reynolds_number()
self.gravity()
def init_grid(self, nx = 10, ny = 10, dx = 0.1, dy = 0.1):
'''
Initializes the numerical grid.
:param nx: Fluid grid width in number of cells
:type nx: int
:param ny: Fluid grid height in number of cells
:type ny: int
:param dx: Grid cell width (meters)
:type dx: float
:param dy: Grid cell height (meters)
:type dy: float
'''
self.nx = numpy.asarray(nx)
self.ny = numpy.asarray(ny)
self.dx = numpy.asarray(dx)
self.dy = numpy.asarray(dy)
self.P = numpy.zeros((nx+2, ny+2))
self.U = numpy.zeros((nx+2, ny+2))
self.V = numpy.zeros((nx+2, ny+2))
def current_time(self, t = 0.0):
'''
Set the current simulation time. Default value = 0.0.
:param t: The current time value.
:type t: float
'''
self.t = numpy.asarray(t)
def end_time(self, t_end = 1.0):
'''
Set the simulation end time.
:param t_end: The time when to stop the simulation.
:type t_end: float
'''
self.t_end = numpy.asarray(t_end)
def file_time(self, t_file = 0.1):
'''
Set the simulation output file interval.
:param t_file: The time when to stop the simulation.
:type t_file: float
'''
self.t_file = numpy.asarray(t_file)
def safety_factor(self, tau = 0.5):
'''
Define the safety factor for the time step size control. Default value =
0.5.
:param tau: Safety factor in ]0;1]
:type tau: float
'''
self.tau = numpy.asarray(tau)
def max_iterations(self, itermax = 5000):
'''
Set the maximal allowed iterations per time step. Default value = 5000.
:param itermax: Max. solution iterations in [1;inf[
:type itermax: int
'''
self.itermax = numpy.asarray(itermax)
def tolerance_criteria(self, epsilon = 1.0e-4):
'''
Set the tolerance criteria for the fluid solver. Default value = 1.0e-4.
:param epsilon: Criteria value
:type epsilon: float
'''
self.epsilon = numpy.asarray(epsilon)
def relaxation_parameter(self, omega = 1.7):
'''
Set the relaxation parameter for the successive overrelaxation (SOR)
solver. The solver is identical to the Gauss-Seidel method when omega =
1. Default value = 1.7.
:param omega: Relaxation parameter value, in ]0;2[
:type omega: float
'''
self.omega = numpy.asarray(omega)
def upwind_differencing_factor(self, gamma = 0.9):
'''
Set the upwind diffencing factor used in the finite difference
approximations. Default value = 0.9.
:param gamma: Upward differencing factor value, in ]0;1[
:type gamma: float
'''
self.gamma = numpy.asarray(gamma)
def boundary_conditions(self, left = 1, right = 1, top = 1, bottom = 1):
'''
Set the wall boundary conditions. The values correspond to the following
conditions: 1) free-slip, 2) no-slip, 3) outflow, 4) periodic
:param left, right, top, bottom: The wall to specify the BC for
:type left, right, top, bottom: int
'''
self.w_left = numpy.asarray(left)
self.w_right = numpy.asarray(right)
self.w_top = numpy.asarray(top)
self.w_bottom = numpy.asarray(bottom)
def reynolds_number(self, re = 100):
'''
Define the simulation Reynolds number.
:param re: Reynolds number in ]0;infty[
:type re: float
'''
self.re = numpy.asarray(re)
def gravity(self, gx = 0.0, gy = 0.0):
'''
Set the gravitational acceleration on the fluid.
:param gx: Horizontal gravitational acceleration.
:type gx: float
:param gy: Vertical gravitational acceleration. Negative values are
downward.
:type gy: float
'''
self.gx = numpy.asarray(gx)
self.gy = numpy.asarray(gy)
def read(self, path, verbose = True):
'''
Read data file from disk.
:param path: Path to data file
:type path: str
'''
fh = None
try:
targetfile = path
if verbose == True:
print('Input file: ' + targetfile)
fh = open(targetfile, 'rb')
self.t = numpy.fromfile(fh, dtype=numpy.float64, count=1)
self.t_end = numpy.fromfile(fh, dtype=numpy.float64, count=1)
self.t_file = numpy.fromfile(fh, dtype=numpy.float64, count=1)
self.tau = numpy.fromfile(fh, dtype=numpy.float64, count=1)
self.itermax = numpy.fromfile(fh, dtype=numpy.int32, count=1)
self.epsilon = numpy.fromfile(fh, dtype=numpy.float64, count=1)
self.omega = numpy.fromfile(fh, dtype=numpy.float64, count=1)
self.gamma = numpy.fromfile(fh, dtype=numpy.float64, count=1)
self.gx = numpy.fromfile(fh, dtype=numpy.float64, count=1)
self.gy = numpy.fromfile(fh, dtype=numpy.float64, count=1)
self.re = numpy.fromfile(fh, dtype=numpy.float64, count=1)
self.w_left = numpy.fromfile(fh, dtype=numpy.int32, count=1)
self.w_right = numpy.fromfile(fh, dtype=numpy.int32, count=1)
self.w_top = numpy.fromfile(fh, dtype=numpy.int32, count=1)
self.w_bottom = numpy.fromfile(fh, dtype=numpy.int32, count=1)
self.dx = numpy.fromfile(fh, dtype=numpy.float64, count=1)
self.dy = numpy.fromfile(fh, dtype=numpy.float64, count=1)
self.nx = numpy.fromfile(fh, dtype=numpy.int32, count=1)
self.ny = numpy.fromfile(fh, dtype=numpy.int32, count=1)
self.init_grid(dx = self.dx, dy = self.dy,\
nx = self.nx, ny = self.ny)
for i in range(self.nx+2):
for j in range(self.ny+2):
self.P[i,j] = \
numpy.fromfile(fh, dtype=numpy.float64, count=1)
for i in range(self.nx+2):
for j in range(self.ny+2):
self.U[i,j] = \
numpy.fromfile(fh, dtype=numpy.float64, count=1)
for i in range(self.nx+2):
for j in range(self.ny+2):
self.V[i,j] = \
numpy.fromfile(fh, dtype=numpy.float64, count=1)
finally:
if fh is not None:
fh.close()
def write(self, verbose = True, folder = './'):
'''
Write the simulation parameters to disk so that the fluid flow solver
can read it.
'''
fh = None
try:
targetfile = folder + '/' + self.sim_id + '.dat'
if verbose == True:
print('Output file: ' + targetfile)
fh = open(targetfile, 'wb')
fh.write(self.t.astype(numpy.float64))
fh.write(self.t_end.astype(numpy.float64))
fh.write(self.t_file.astype(numpy.float64))
fh.write(self.tau.astype(numpy.float64))
fh.write(self.itermax.astype(numpy.int32))
fh.write(self.epsilon.astype(numpy.float64))
fh.write(self.omega.astype(numpy.float64))
fh.write(self.gamma.astype(numpy.float64))
fh.write(self.gx.astype(numpy.float64))
fh.write(self.gy.astype(numpy.float64))
fh.write(self.re.astype(numpy.float64))
fh.write(self.w_left.astype(numpy.int32))
fh.write(self.w_right.astype(numpy.int32))
fh.write(self.w_top.astype(numpy.int32))
fh.write(self.w_bottom.astype(numpy.int32))
fh.write(self.dx.astype(numpy.float64))
fh.write(self.dy.astype(numpy.float64))
fh.write(self.nx.astype(numpy.int32))
fh.write(self.ny.astype(numpy.int32))
for i in range(self.nx+2):
for j in range(self.ny+2):
fh.write(self.P[i,j].astype(numpy.float64))
for i in range(self.nx+2):
for j in range(self.ny+2):
fh.write(self.U[i,j].astype(numpy.float64))
for i in range(self.nx+2):
for j in range(self.ny+2):
fh.write(self.V[i,j].astype(numpy.float64))
finally:
if fh is not None:
fh.close()
def run(self):
'''
Run the simulation using the C program.
'''
self.write()
subprocess.call('./ns2dfd ' + self.sim_id + '.dat', shell=True)
def writeVTK(self, folder = './', verbose = True):
'''
Writes a VTK file for the fluid grid to the current folder by default.
The file name will be in the format ``<self.sid>.vti``. The vti files
can be used for visualizing the fluid in ParaView.
The fluid grid is visualized by opening the vti files, and pressing
"Apply" to import all fluid field properties. To visualize the scalar
fields, such as the pressure, the porosity, the porosity change or the
velocity magnitude, choose "Surface" or "Surface With Edges" as the
"Representation". Choose the desired property as the "Coloring" field.
It may be desirable to show the color bar by pressing the "Show" button,
and "Rescale" to fit the color range limits to the current file. The
coordinate system can be displayed by checking the "Show Axis" field.
All adjustments by default require the "Apply" button to be pressed
before regenerating the view.
The fluid vector fields (e.g. the fluid velocity) can be visualizing by
e.g. arrows. To do this, select the fluid data in the "Pipeline
Browser". Press "Glyph" from the "Common" toolbar, or go to the
"Filters" mennu, and press "Glyph" from the "Common" list. Make sure
that "Arrow" is selected as the "Glyph type", and "Velocity" as the
"Vectors" value. Adjust the "Maximum Number of Points" to be at least as
big as the number of fluid cells in the grid. Press "Apply" to visualize
the arrows.
If several data files are generated for the same simulation (e.g. using
the :func:`writeVTKall()` function), it is able to step the
visualization through time by using the ParaView controls.
:param folder: The folder where to place the output binary file (default
(default = './')
:type folder: str
:param verbose: Show diagnostic information (default = True)
:type verbose: bool
'''
filename = folder + '/' + self.sim_id + '.vti' # image grid
# initalize VTK data structure
grid = vtk.vtkImageData()
grid.SetOrigin([0.0, 0.0, 0.0])
grid.SetSpacing([self.dx, self.dy, 1])
grid.SetDimensions([self.nx+2, self.ny+2, 1])
# array of scalars: hydraulic pressures
pres = vtk.vtkDoubleArray()
pres.SetName("Pressure")
pres.SetNumberOfComponents(1)
pres.SetNumberOfTuples(grid.GetNumberOfPoints())
# array of vectors: hydraulic velocities
vel = vtk.vtkDoubleArray()
vel.SetName("Velocity")
vel.SetNumberOfComponents(2)
vel.SetNumberOfTuples(grid.GetNumberOfPoints())
# insert values
for y in range(self.ny+2):
for x in range(self.nx+2):
idx = x + (self.nx+2)*y
pres.SetValue(idx, self.P[x,y])
vel.SetTuple(idx, [self.U[x,y], self.V[x,y]])
# add pres array to grid
grid.GetPointData().AddArray(pres)
grid.GetPointData().AddArray(vel)
# write VTK XML image data file
writer = vtk.vtkXMLImageDataWriter()
writer.SetFileName(filename)
writer.SetInput(grid)
writer.Update()
if (verbose == True):
print('Output file: {0}'.format(filename))
def plot_PUV(self, format = 'png'):
plt.figure(figsize=[8,8])
#ax = plt.subplot(1, 3, 1)
plt.title("Pressure")
imgplt = plt.imshow(self.P.T, origin='lower')
imgplt.set_interpolation('nearest')
#imgplt.set_interpolation('bicubic')
#imgplt.set_cmap('hot')
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.colorbar()
# show velocities as arrows
Q = plt.quiver(self.U, self.V)
# show velocities as stream lines
#plt.streamplot(numpy.arange(self.nx+2),numpy.arange(self.ny+2),\
#self.U, self.V)
'''
# show velocities as heat maps
ax = plt.subplot(1, 3, 2)
plt.title("U")
imgplt = plt.imshow(self.U.T, origin='lower')
imgplt.set_interpolation('nearest')
#imgplt.set_interpolation('bicubic')
#imgplt.set_cmap('hot')
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.colorbar()
ax = plt.subplot(1, 3, 3)
plt.title("V")
imgplt = plt.imshow(self.V.T, origin='lower')
imgplt.set_interpolation('nearest')
#imgplt.set_interpolation('bicubic')
#imgplt.set_cmap('hot')
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.colorbar()
'''
plt.savefig(self.sim_id + '-PUV.' + format, transparent=False)
| gpl-3.0 |
gotomypc/scikit-learn | sklearn/linear_model/omp.py | 127 | 30417 | """Orthogonal matching pursuit algorithms
"""
# Author: Vlad Niculae
#
# License: BSD 3 clause
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from ..utils import as_float_array, check_array, check_X_y
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
# check_finite=False is an optimization available only in scipy >=0.12
solve_triangular_args = {'check_finite': False}
premature = """ Orthogonal matching pursuit ended prematurely due to linear
dependence in the dictionary. The requested precision might not have been met.
"""
def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True,
return_path=False):
"""Orthogonal Matching Pursuit step using the Cholesky decomposition.
Parameters
----------
X : array, shape (n_samples, n_features)
Input dictionary. Columns are assumed to have unit norm.
y : array, shape (n_samples,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coef : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
if copy_X:
X = X.copy('F')
else: # even if we are allowed to overwrite, still copy it if bad order
X = np.asfortranarray(X)
min_float = np.finfo(X.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,))
potrs, = get_lapack_funcs(('potrs',), (X,))
alpha = np.dot(X.T, y)
residual = y
gamma = np.empty(0)
n_active = 0
indices = np.arange(X.shape[1]) # keeping track of swapping
max_features = X.shape[1] if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=X.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=X.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(np.dot(X.T, residual)))
if lam < n_active or alpha[lam] ** 2 < min_float:
# atom already selected or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
if n_active > 0:
# Updates the Cholesky decomposition of X' X
L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam])
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=2)
break
L[n_active, n_active] = np.sqrt(1 - v)
X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam])
alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active]
indices[n_active], indices[lam] = indices[lam], indices[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
residual = y - np.dot(X[:, :n_active], gamma)
if tol is not None and nrm2(residual) ** 2 <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None,
copy_Gram=True, copy_Xy=True, return_path=False):
"""Orthogonal Matching Pursuit step on a precomputed Gram matrix.
This function uses the the Cholesky decomposition method.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data matrix
Xy : array, shape (n_features,)
Input targets
n_nonzero_coefs : int
Targeted number of non-zero elements
tol_0 : float
Squared norm of y, required if tol is not None.
tol : float
Targeted squared error, if not None overrides n_nonzero_coefs.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
Returns
-------
gamma : array, shape (n_nonzero_coefs,)
Non-zero elements of the solution
idx : array, shape (n_nonzero_coefs,)
Indices of the positions of the elements in gamma within the solution
vector
coefs : array, shape (n_features, n_nonzero_coefs)
The first k values of column k correspond to the coefficient value
for the active features at that step. The lower left triangle contains
garbage. Only returned if ``return_path=True``.
n_active : int
Number of active features at convergence.
"""
Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram)
if copy_Xy:
Xy = Xy.copy()
min_float = np.finfo(Gram.dtype).eps
nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,))
potrs, = get_lapack_funcs(('potrs',), (Gram,))
indices = np.arange(len(Gram)) # keeping track of swapping
alpha = Xy
tol_curr = tol_0
delta = 0
gamma = np.empty(0)
n_active = 0
max_features = len(Gram) if tol is not None else n_nonzero_coefs
if solve_triangular_args:
# new scipy, don't need to initialize because check_finite=False
L = np.empty((max_features, max_features), dtype=Gram.dtype)
else:
# old scipy, we need the garbage upper triangle to be non-Inf
L = np.zeros((max_features, max_features), dtype=Gram.dtype)
L[0, 0] = 1.
if return_path:
coefs = np.empty_like(L)
while True:
lam = np.argmax(np.abs(alpha))
if lam < n_active or alpha[lam] ** 2 < min_float:
# selected same atom twice, or inner product too small
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
if n_active > 0:
L[n_active, :n_active] = Gram[lam, :n_active]
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = nrm2(L[n_active, :n_active]) ** 2
if 1 - v <= min_float: # selected atoms are dependent
warnings.warn(premature, RuntimeWarning, stacklevel=3)
break
L[n_active, n_active] = np.sqrt(1 - v)
Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam])
Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam])
indices[n_active], indices[lam] = indices[lam], indices[n_active]
Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active]
n_active += 1
# solves LL'x = y as a composition of two triangular systems
gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True,
overwrite_b=False)
if return_path:
coefs[:n_active, n_active - 1] = gamma
beta = np.dot(Gram[:, :n_active], gamma)
alpha = Xy - beta
if tol is not None:
tol_curr += delta
delta = np.inner(gamma, beta[:n_active])
tol_curr -= delta
if abs(tol_curr) <= tol:
break
elif n_active == max_features:
break
if return_path:
return gamma, indices[:n_active], coefs[:, :n_active], n_active
else:
return gamma, indices[:n_active], n_active
def orthogonal_mp(X, y, n_nonzero_coefs=None, tol=None, precompute=False,
copy_X=True, return_path=False,
return_n_iter=False):
"""Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems.
An instance of the problem has the form:
When parametrized by the number of non-zero coefficients using
`n_nonzero_coefs`:
argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs}
When parametrized by error using the parameter `tol`:
argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
X : array, shape (n_samples, n_features)
Input data. Columns are assumed to have unit norm.
y : array, shape (n_samples,) or (n_samples, n_targets)
Input targets
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
precompute : {True, False, 'auto'},
Whether to perform precomputations. Improves performance when n_targets
or n_samples is very large.
copy_X : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp_gram
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
X = check_array(X, order='F', copy=copy_X)
copy_X = False
if y.ndim == 1:
y = y.reshape(-1, 1)
y = check_array(y)
if y.shape[1] > 1: # subsequent targets will be affected
copy_X = True
if n_nonzero_coefs is None and tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1)
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > X.shape[1]:
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if precompute == 'auto':
precompute = X.shape[0] > X.shape[1]
if precompute:
G = np.dot(X.T, X)
G = np.asfortranarray(G)
Xy = np.dot(X.T, y)
if tol is not None:
norms_squared = np.sum((y ** 2), axis=0)
else:
norms_squared = None
return orthogonal_mp_gram(G, Xy, n_nonzero_coefs, tol, norms_squared,
copy_Gram=copy_X, copy_Xy=False,
return_path=return_path)
if return_path:
coef = np.zeros((X.shape[1], y.shape[1], X.shape[1]))
else:
coef = np.zeros((X.shape[1], y.shape[1]))
n_iters = []
for k in range(y.shape[1]):
out = _cholesky_omp(
X, y[:, k], n_nonzero_coefs, tol,
copy_X=copy_X, return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if y.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
def orthogonal_mp_gram(Gram, Xy, n_nonzero_coefs=None, tol=None,
norms_squared=None, copy_Gram=True,
copy_Xy=True, return_path=False,
return_n_iter=False):
"""Gram Orthogonal Matching Pursuit (OMP)
Solves n_targets Orthogonal Matching Pursuit problems using only
the Gram matrix X.T * X and the product X.T * y.
Read more in the :ref:`User Guide <omp>`.
Parameters
----------
Gram : array, shape (n_features, n_features)
Gram matrix of the input data: X.T * X
Xy : array, shape (n_features,) or (n_features, n_targets)
Input targets multiplied by X: X.T * y
n_nonzero_coefs : int
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
norms_squared : array-like, shape (n_targets,)
Squared L2 norms of the lines of y. Required if tol is not None.
copy_Gram : bool, optional
Whether the gram matrix must be copied by the algorithm. A false
value is only helpful if it is already Fortran-ordered, otherwise a
copy is made anyway.
copy_Xy : bool, optional
Whether the covariance vector Xy must be copied by the algorithm.
If False, it may be overwritten.
return_path : bool, optional. Default: False
Whether to return every value of the nonzero coefficients along the
forward path. Useful for cross-validation.
return_n_iter : bool, optional default False
Whether or not to return the number of iterations.
Returns
-------
coef : array, shape (n_features,) or (n_features, n_targets)
Coefficients of the OMP solution. If `return_path=True`, this contains
the whole coefficient path. In this case its shape is
(n_features, n_features) or (n_features, n_targets, n_features) and
iterating over the last axis yields coefficients in increasing order
of active features.
n_iters : array-like or int
Number of active features across every target. Returned only if
`return_n_iter` is set to True.
See also
--------
OrthogonalMatchingPursuit
orthogonal_mp
lars_path
decomposition.sparse_encode
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
"""
Gram = check_array(Gram, order='F', copy=copy_Gram)
Xy = np.asarray(Xy)
if Xy.ndim > 1 and Xy.shape[1] > 1:
# or subsequent target will be affected
copy_Gram = True
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if tol is not None:
norms_squared = [norms_squared]
if n_nonzero_coefs is None and tol is None:
n_nonzero_coefs = int(0.1 * len(Gram))
if tol is not None and norms_squared is None:
raise ValueError('Gram OMP needs the precomputed norms in order '
'to evaluate the error sum of squares.')
if tol is not None and tol < 0:
raise ValueError("Epsilon cannot be negative")
if tol is None and n_nonzero_coefs <= 0:
raise ValueError("The number of atoms must be positive")
if tol is None and n_nonzero_coefs > len(Gram):
raise ValueError("The number of atoms cannot be more than the number "
"of features")
if return_path:
coef = np.zeros((len(Gram), Xy.shape[1], len(Gram)))
else:
coef = np.zeros((len(Gram), Xy.shape[1]))
n_iters = []
for k in range(Xy.shape[1]):
out = _gram_omp(
Gram, Xy[:, k], n_nonzero_coefs,
norms_squared[k] if tol is not None else None, tol,
copy_Gram=copy_Gram, copy_Xy=copy_Xy,
return_path=return_path)
if return_path:
_, idx, coefs, n_iter = out
coef = coef[:, :, :len(idx)]
for n_active, x in enumerate(coefs.T):
coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1]
else:
x, idx, n_iter = out
coef[idx, k] = x
n_iters.append(n_iter)
if Xy.shape[1] == 1:
n_iters = n_iters[0]
if return_n_iter:
return np.squeeze(coef), n_iters
else:
return np.squeeze(coef)
class OrthogonalMatchingPursuit(LinearModel, RegressorMixin):
"""Orthogonal Matching Pursuit model (OMP)
Parameters
----------
n_nonzero_coefs : int, optional
Desired number of non-zero entries in the solution. If None (by
default) this value is set to 10% of n_features.
tol : float, optional
Maximum norm of the residual. If not None, overrides n_nonzero_coefs.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
precompute : {True, False, 'auto'}, default 'auto'
Whether to use a precomputed Gram and Xy matrix to speed up
calculations. Improves performance when `n_targets` or `n_samples` is
very large. Note that if you already have such matrices, you can pass
them directly to the fit method.
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
coef_ : array, shape (n_features,) or (n_features, n_targets)
parameter vector (w in the formula)
intercept_ : float or array, shape (n_targets,)
independent term in decision function.
n_iter_ : int or array-like
Number of active features across every target.
Notes
-----
Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang,
Matching pursuits with time-frequency dictionaries, IEEE Transactions on
Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415.
(http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf)
This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad,
M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal
Matching Pursuit Technical Report - CS Technion, April 2008.
http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
decomposition.sparse_encode
"""
def __init__(self, n_nonzero_coefs=None, tol=None, fit_intercept=True,
normalize=True, precompute='auto'):
self.n_nonzero_coefs = n_nonzero_coefs
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std, Gram, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=True)
if y.ndim == 1:
y = y[:, np.newaxis]
if self.n_nonzero_coefs is None and self.tol is None:
# default for n_nonzero_coefs is 0.1 * n_features
# but at least one.
self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1)
else:
self.n_nonzero_coefs_ = self.n_nonzero_coefs
if Gram is False:
coef_, self.n_iter_ = orthogonal_mp(
X, y, self.n_nonzero_coefs_, self.tol,
precompute=False, copy_X=True,
return_n_iter=True)
else:
norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None
coef_, self.n_iter_ = orthogonal_mp_gram(
Gram, Xy=Xy, n_nonzero_coefs=self.n_nonzero_coefs_,
tol=self.tol, norms_squared=norms_sq,
copy_Gram=True, copy_Xy=True,
return_n_iter=True)
self.coef_ = coef_.T
self._set_intercept(X_mean, y_mean, X_std)
return self
def _omp_path_residues(X_train, y_train, X_test, y_test, copy=True,
fit_intercept=True, normalize=True, max_iter=100):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied. If
False, they may be overwritten.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 100 by default.
Returns
-------
residues: array, shape (n_samples, max_features)
Residues of the prediction on the test data
"""
if copy:
X_train = X_train.copy()
y_train = y_train.copy()
X_test = X_test.copy()
y_test = y_test.copy()
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
coefs = orthogonal_mp(X_train, y_train, n_nonzero_coefs=max_iter, tol=None,
precompute=False, copy_X=False,
return_path=True)
if coefs.ndim == 1:
coefs = coefs[:, np.newaxis]
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
return np.dot(coefs.T, X_test.T) - y_test
class OrthogonalMatchingPursuitCV(LinearModel, RegressorMixin):
"""Cross-validated Orthogonal Matching Pursuit model (OMP)
Parameters
----------
copy : bool, optional
Whether the design matrix X must be copied by the algorithm. A false
value is only helpful if X is already Fortran-ordered, otherwise a
copy is made anyway.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional
If False, the regressors X are assumed to be already normalized.
max_iter : integer, optional
Maximum numbers of iterations to perform, therefore maximum features
to include. 10% of ``n_features`` but at least 5 if available.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Read more in the :ref:`User Guide <omp>`.
Attributes
----------
intercept_ : float or array, shape (n_targets,)
Independent term in decision function.
coef_ : array, shape (n_features,) or (n_features, n_targets)
Parameter vector (w in the problem formulation).
n_nonzero_coefs_ : int
Estimated number of non-zero coefficients giving the best mean squared
error over the cross-validation folds.
n_iter_ : int or array-like
Number of active features across every target for the model refit with
the best hyperparameters got by cross-validating across all folds.
See also
--------
orthogonal_mp
orthogonal_mp_gram
lars_path
Lars
LassoLars
OrthogonalMatchingPursuit
LarsCV
LassoLarsCV
decomposition.sparse_encode
"""
def __init__(self, copy=True, fit_intercept=True, normalize=True,
max_iter=None, cv=None, n_jobs=1, verbose=False):
self.copy = copy
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.cv = cv
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data.
y : array-like, shape [n_samples]
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True)
X = as_float_array(X, copy=False, force_all_finite=False)
cv = check_cv(self.cv, X, y, classifier=False)
max_iter = (min(max(int(0.1 * X.shape[1]), 5), X.shape[1])
if not self.max_iter
else self.max_iter)
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_omp_path_residues)(
X[train], y[train], X[test], y[test], self.copy,
self.fit_intercept, self.normalize, max_iter)
for train, test in cv)
min_early_stop = min(fold.shape[0] for fold in cv_paths)
mse_folds = np.array([(fold[:min_early_stop] ** 2).mean(axis=1)
for fold in cv_paths])
best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1
self.n_nonzero_coefs_ = best_n_nonzero_coefs
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=best_n_nonzero_coefs,
fit_intercept=self.fit_intercept,
normalize=self.normalize)
omp.fit(X, y)
self.coef_ = omp.coef_
self.intercept_ = omp.intercept_
self.n_iter_ = omp.n_iter_
return self
| bsd-3-clause |
pratapvardhan/pandas | pandas/util/_doctools.py | 5 | 6806 | import numpy as np
import pandas as pd
import pandas.compat as compat
class TablePlotter(object):
"""
Layout some DataFrames in vertical/horizontal layout for explanation.
Used in merging.rst
"""
def __init__(self, cell_width=0.37, cell_height=0.25, font_size=7.5):
self.cell_width = cell_width
self.cell_height = cell_height
self.font_size = font_size
def _shape(self, df):
"""
Calculate table chape considering index levels.
"""
row, col = df.shape
return row + df.columns.nlevels, col + df.index.nlevels
def _get_cells(self, left, right, vertical):
"""
Calculate appropriate figure size based on left and right data.
"""
if vertical:
# calculate required number of cells
vcells = max(sum(self._shape(l)[0] for l in left),
self._shape(right)[0])
hcells = (max(self._shape(l)[1] for l in left) +
self._shape(right)[1])
else:
vcells = max([self._shape(l)[0] for l in left] +
[self._shape(right)[0]])
hcells = sum([self._shape(l)[1] for l in left] +
[self._shape(right)[1]])
return hcells, vcells
def plot(self, left, right, labels=None, vertical=True):
"""
Plot left / right DataFrames in specified layout.
Parameters
----------
left : list of DataFrames before operation is applied
right : DataFrame of operation result
labels : list of str to be drawn as titles of left DataFrames
vertical : bool
If True, use vertical layout. If False, use horizontal layout.
"""
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
if not isinstance(left, list):
left = [left]
left = [self._conv(l) for l in left]
right = self._conv(right)
hcells, vcells = self._get_cells(left, right, vertical)
if vertical:
figsize = self.cell_width * hcells, self.cell_height * vcells
else:
# include margin for titles
figsize = self.cell_width * hcells, self.cell_height * vcells
fig = plt.figure(figsize=figsize)
if vertical:
gs = gridspec.GridSpec(len(left), hcells)
# left
max_left_cols = max(self._shape(l)[1] for l in left)
max_left_rows = max(self._shape(l)[0] for l in left)
for i, (l, label) in enumerate(zip(left, labels)):
ax = fig.add_subplot(gs[i, 0:max_left_cols])
self._make_table(ax, l, title=label,
height=1.0 / max_left_rows)
# right
ax = plt.subplot(gs[:, max_left_cols:])
self._make_table(ax, right, title='Result', height=1.05 / vcells)
fig.subplots_adjust(top=0.9, bottom=0.05, left=0.05, right=0.95)
else:
max_rows = max(self._shape(df)[0] for df in left + [right])
height = 1.0 / np.max(max_rows)
gs = gridspec.GridSpec(1, hcells)
# left
i = 0
for l, label in zip(left, labels):
sp = self._shape(l)
ax = fig.add_subplot(gs[0, i:i + sp[1]])
self._make_table(ax, l, title=label, height=height)
i += sp[1]
# right
ax = plt.subplot(gs[0, i:])
self._make_table(ax, right, title='Result', height=height)
fig.subplots_adjust(top=0.85, bottom=0.05, left=0.05, right=0.95)
return fig
def _conv(self, data):
"""Convert each input to appropriate for table outplot"""
if isinstance(data, pd.Series):
if data.name is None:
data = data.to_frame(name='')
else:
data = data.to_frame()
data = data.fillna('NaN')
return data
def _insert_index(self, data):
# insert is destructive
data = data.copy()
idx_nlevels = data.index.nlevels
if idx_nlevels == 1:
data.insert(0, 'Index', data.index)
else:
for i in range(idx_nlevels):
data.insert(i, 'Index{0}'.format(i),
data.index._get_level_values(i))
col_nlevels = data.columns.nlevels
if col_nlevels > 1:
col = data.columns._get_level_values(0)
values = [data.columns._get_level_values(i).values
for i in range(1, col_nlevels)]
col_df = pd.DataFrame(values)
data.columns = col_df.columns
data = pd.concat([col_df, data])
data.columns = col
return data
def _make_table(self, ax, df, title, height=None):
if df is None:
ax.set_visible(False)
return
import pandas.plotting as plotting
idx_nlevels = df.index.nlevels
col_nlevels = df.columns.nlevels
# must be convert here to get index levels for colorization
df = self._insert_index(df)
tb = plotting.table(ax, df, loc=9)
tb.set_fontsize(self.font_size)
if height is None:
height = 1.0 / (len(df) + 1)
props = tb.properties()
for (r, c), cell in compat.iteritems(props['celld']):
if c == -1:
cell.set_visible(False)
elif r < col_nlevels and c < idx_nlevels:
cell.set_visible(False)
elif r < col_nlevels or c < idx_nlevels:
cell.set_facecolor('#AAAAAA')
cell.set_height(height)
ax.set_title(title, size=self.font_size)
ax.axis('off')
if __name__ == "__main__":
import matplotlib.pyplot as plt
p = TablePlotter()
df1 = pd.DataFrame({'A': [10, 11, 12],
'B': [20, 21, 22],
'C': [30, 31, 32]})
df2 = pd.DataFrame({'A': [10, 12],
'C': [30, 32]})
p.plot([df1, df2], pd.concat([df1, df2]),
labels=['df1', 'df2'], vertical=True)
plt.show()
df3 = pd.DataFrame({'X': [10, 12],
'Z': [30, 32]})
p.plot([df1, df3], pd.concat([df1, df3], axis=1),
labels=['df1', 'df2'], vertical=False)
plt.show()
idx = pd.MultiIndex.from_tuples([(1, 'A'), (1, 'B'), (1, 'C'),
(2, 'A'), (2, 'B'), (2, 'C')])
col = pd.MultiIndex.from_tuples([(1, 'A'), (1, 'B')])
df3 = pd.DataFrame({'v1': [1, 2, 3, 4, 5, 6],
'v2': [5, 6, 7, 8, 9, 10]},
index=idx)
df3.columns = col
p.plot(df3, df3, labels=['df3'])
plt.show()
| bsd-3-clause |
pratapvardhan/pandas | pandas/core/dtypes/generic.py | 5 | 3609 | """ define generic base classes for pandas objects """
# define abstract base classes to enable isinstance type checking on our
# objects
def create_pandas_abc_type(name, attr, comp):
@classmethod
def _check(cls, inst):
return getattr(inst, attr, '_typ') in comp
dct = dict(__instancecheck__=_check, __subclasscheck__=_check)
meta = type("ABCBase", (type, ), dct)
return meta(name, tuple(), dct)
ABCIndex = create_pandas_abc_type("ABCIndex", "_typ", ("index", ))
ABCInt64Index = create_pandas_abc_type("ABCInt64Index", "_typ",
("int64index", ))
ABCUInt64Index = create_pandas_abc_type("ABCUInt64Index", "_typ",
("uint64index", ))
ABCRangeIndex = create_pandas_abc_type("ABCRangeIndex", "_typ",
("rangeindex", ))
ABCFloat64Index = create_pandas_abc_type("ABCFloat64Index", "_typ",
("float64index", ))
ABCMultiIndex = create_pandas_abc_type("ABCMultiIndex", "_typ",
("multiindex", ))
ABCDatetimeIndex = create_pandas_abc_type("ABCDatetimeIndex", "_typ",
("datetimeindex", ))
ABCTimedeltaIndex = create_pandas_abc_type("ABCTimedeltaIndex", "_typ",
("timedeltaindex", ))
ABCPeriodIndex = create_pandas_abc_type("ABCPeriodIndex", "_typ",
("periodindex", ))
ABCCategoricalIndex = create_pandas_abc_type("ABCCategoricalIndex", "_typ",
("categoricalindex", ))
ABCIntervalIndex = create_pandas_abc_type("ABCIntervalIndex", "_typ",
("intervalindex", ))
ABCIndexClass = create_pandas_abc_type("ABCIndexClass", "_typ",
("index", "int64index", "rangeindex",
"float64index", "uint64index",
"multiindex", "datetimeindex",
"timedeltaindex", "periodindex",
"categoricalindex", "intervalindex"))
ABCSeries = create_pandas_abc_type("ABCSeries", "_typ", ("series", ))
ABCDataFrame = create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe", ))
ABCSparseDataFrame = create_pandas_abc_type("ABCSparseDataFrame", "_subtyp",
("sparse_frame", ))
ABCPanel = create_pandas_abc_type("ABCPanel", "_typ", ("panel",))
ABCSparseSeries = create_pandas_abc_type("ABCSparseSeries", "_subtyp",
('sparse_series',
'sparse_time_series'))
ABCSparseArray = create_pandas_abc_type("ABCSparseArray", "_subtyp",
('sparse_array', 'sparse_series'))
ABCCategorical = create_pandas_abc_type("ABCCategorical", "_typ",
("categorical"))
ABCPeriod = create_pandas_abc_type("ABCPeriod", "_typ", ("period", ))
ABCDateOffset = create_pandas_abc_type("ABCDateOffset", "_typ",
("dateoffset",))
ABCInterval = create_pandas_abc_type("ABCInterval", "_typ", ("interval", ))
ABCExtensionArray = create_pandas_abc_type("ABCExtensionArray", "_typ",
("extension", "categorical",))
class _ABCGeneric(type):
def __instancecheck__(cls, inst):
return hasattr(inst, "_data")
ABCGeneric = _ABCGeneric("ABCGeneric", tuple(), {})
| bsd-3-clause |
webmasterraj/FogOrNot | flask/lib/python2.7/site-packages/pandas/tseries/tests/test_period.py | 2 | 119392 | """Tests suite for Period handling.
Parts derived from scikits.timeseries code, original authors:
- Pierre Gerard-Marchant & Matt Knox
- pierregm_at_uga_dot_edu - mattknow_ca_at_hotmail_dot_com
"""
from datetime import datetime, date, timedelta
from numpy.ma.testutils import assert_equal
from pandas import Timestamp
from pandas.tseries.frequencies import MONTHS, DAYS, _period_code_map
from pandas.tseries.period import Period, PeriodIndex, period_range
from pandas.tseries.index import DatetimeIndex, date_range, Index
from pandas.tseries.tools import to_datetime
import pandas.tseries.period as period
import pandas.tseries.offsets as offsets
import pandas.core.datetools as datetools
import pandas as pd
import numpy as np
from numpy.random import randn
from pandas.compat import range, lrange, lmap, zip
from pandas import Series, TimeSeries, DataFrame, _np_version_under1p9
from pandas import tslib
from pandas.util.testing import(assert_series_equal, assert_almost_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas import compat
from numpy.testing import assert_array_equal
class TestPeriodProperties(tm.TestCase):
"Test properties such as year, month, weekday, etc...."
#
def test_quarterly_negative_ordinals(self):
p = Period(ordinal=-1, freq='Q-DEC')
self.assertEqual(p.year, 1969)
self.assertEqual(p.quarter, 4)
p = Period(ordinal=-2, freq='Q-DEC')
self.assertEqual(p.year, 1969)
self.assertEqual(p.quarter, 3)
p = Period(ordinal=-2, freq='M')
self.assertEqual(p.year, 1969)
self.assertEqual(p.month, 11)
def test_period_cons_quarterly(self):
# bugs in scikits.timeseries
for month in MONTHS:
freq = 'Q-%s' % month
exp = Period('1989Q3', freq=freq)
self.assertIn('1989Q3', str(exp))
stamp = exp.to_timestamp('D', how='end')
p = Period(stamp, freq=freq)
self.assertEqual(p, exp)
def test_period_cons_annual(self):
# bugs in scikits.timeseries
for month in MONTHS:
freq = 'A-%s' % month
exp = Period('1989', freq=freq)
stamp = exp.to_timestamp('D', how='end') + timedelta(days=30)
p = Period(stamp, freq=freq)
self.assertEqual(p, exp + 1)
def test_period_cons_weekly(self):
for num in range(10, 17):
daystr = '2011-02-%d' % num
for day in DAYS:
freq = 'W-%s' % day
result = Period(daystr, freq=freq)
expected = Period(daystr, freq='D').asfreq(freq)
self.assertEqual(result, expected)
def test_period_cons_nat(self):
p = Period('NaT', freq='M')
self.assertEqual(p.ordinal, tslib.iNaT)
self.assertEqual(p.freq, 'M')
p = Period('nat', freq='W-SUN')
self.assertEqual(p.ordinal, tslib.iNaT)
self.assertEqual(p.freq, 'W-SUN')
p = Period(tslib.iNaT, freq='D')
self.assertEqual(p.ordinal, tslib.iNaT)
self.assertEqual(p.freq, 'D')
self.assertRaises(ValueError, Period, 'NaT')
def test_timestamp_tz_arg(self):
import pytz
p = Period('1/1/2005', freq='M').to_timestamp(tz='Europe/Brussels')
self.assertEqual(p.tz,
pytz.timezone('Europe/Brussels').normalize(p).tzinfo)
def test_timestamp_tz_arg_dateutil(self):
import dateutil
from pandas.tslib import maybe_get_tz
p = Period('1/1/2005', freq='M').to_timestamp(tz=maybe_get_tz('dateutil/Europe/Brussels'))
self.assertEqual(p.tz, dateutil.zoneinfo.gettz('Europe/Brussels'))
def test_timestamp_tz_arg_dateutil_from_string(self):
import dateutil
p = Period('1/1/2005', freq='M').to_timestamp(tz='dateutil/Europe/Brussels')
self.assertEqual(p.tz, dateutil.zoneinfo.gettz('Europe/Brussels'))
def test_timestamp_nat_tz(self):
t = Period('NaT', freq='M').to_timestamp()
self.assertTrue(t is tslib.NaT)
t = Period('NaT', freq='M').to_timestamp(tz='Asia/Tokyo')
self.assertTrue(t is tslib.NaT)
def test_period_constructor(self):
i1 = Period('1/1/2005', freq='M')
i2 = Period('Jan 2005')
self.assertEqual(i1, i2)
i1 = Period('2005', freq='A')
i2 = Period('2005')
i3 = Period('2005', freq='a')
self.assertEqual(i1, i2)
self.assertEqual(i1, i3)
i4 = Period('2005', freq='M')
i5 = Period('2005', freq='m')
self.assertRaises(ValueError, i1.__ne__, i4)
self.assertEqual(i4, i5)
i1 = Period.now('Q')
i2 = Period(datetime.now(), freq='Q')
i3 = Period.now('q')
self.assertEqual(i1, i2)
self.assertEqual(i1, i3)
# Biz day construction, roll forward if non-weekday
i1 = Period('3/10/12', freq='B')
i2 = Period('3/10/12', freq='D')
self.assertEqual(i1, i2.asfreq('B'))
i2 = Period('3/11/12', freq='D')
self.assertEqual(i1, i2.asfreq('B'))
i2 = Period('3/12/12', freq='D')
self.assertEqual(i1, i2.asfreq('B'))
i3 = Period('3/10/12', freq='b')
self.assertEqual(i1, i3)
i1 = Period(year=2005, quarter=1, freq='Q')
i2 = Period('1/1/2005', freq='Q')
self.assertEqual(i1, i2)
i1 = Period(year=2005, quarter=3, freq='Q')
i2 = Period('9/1/2005', freq='Q')
self.assertEqual(i1, i2)
i1 = Period(year=2005, month=3, day=1, freq='D')
i2 = Period('3/1/2005', freq='D')
self.assertEqual(i1, i2)
i3 = Period(year=2005, month=3, day=1, freq='d')
self.assertEqual(i1, i3)
i1 = Period(year=2012, month=3, day=10, freq='B')
i2 = Period('3/12/12', freq='B')
self.assertEqual(i1, i2)
i1 = Period('2005Q1')
i2 = Period(year=2005, quarter=1, freq='Q')
i3 = Period('2005q1')
self.assertEqual(i1, i2)
self.assertEqual(i1, i3)
i1 = Period('05Q1')
self.assertEqual(i1, i2)
lower = Period('05q1')
self.assertEqual(i1, lower)
i1 = Period('1Q2005')
self.assertEqual(i1, i2)
lower = Period('1q2005')
self.assertEqual(i1, lower)
i1 = Period('1Q05')
self.assertEqual(i1, i2)
lower = Period('1q05')
self.assertEqual(i1, lower)
i1 = Period('4Q1984')
self.assertEqual(i1.year, 1984)
lower = Period('4q1984')
self.assertEqual(i1, lower)
i1 = Period('1982', freq='min')
i2 = Period('1982', freq='MIN')
self.assertEqual(i1, i2)
i2 = Period('1982', freq=('Min', 1))
self.assertEqual(i1, i2)
expected = Period('2007-01', freq='M')
i1 = Period('200701', freq='M')
self.assertEqual(i1, expected)
i1 = Period('200701', freq='M')
self.assertEqual(i1, expected)
i1 = Period(200701, freq='M')
self.assertEqual(i1, expected)
i1 = Period(ordinal=200701, freq='M')
self.assertEqual(i1.year, 18695)
i1 = Period(datetime(2007, 1, 1), freq='M')
i2 = Period('200701', freq='M')
self.assertEqual(i1, i2)
i1 = Period(date(2007, 1, 1), freq='M')
i2 = Period(datetime(2007, 1, 1), freq='M')
self.assertEqual(i1, i2)
i1 = Period('2007-01-01 09:00:00.001')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq='L')
self.assertEqual(i1, expected)
i1 = Period('2007-01-01 09:00:00.00101')
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq='U')
self.assertEqual(i1, expected)
self.assertRaises(ValueError, Period, ordinal=200701)
self.assertRaises(ValueError, Period, '2007-1-1', freq='X')
def test_freq_str(self):
i1 = Period('1982', freq='Min')
self.assertNotEqual(i1.freq[0], '1')
def test_repr(self):
p = Period('Jan-2000')
self.assertIn('2000-01', repr(p))
p = Period('2000-12-15')
self.assertIn('2000-12-15', repr(p))
def test_repr_nat(self):
p = Period('nat', freq='M')
self.assertIn(repr(tslib.NaT), repr(p))
def test_millisecond_repr(self):
p = Period('2000-01-01 12:15:02.123')
self.assertEqual("Period('2000-01-01 12:15:02.123', 'L')", repr(p))
def test_microsecond_repr(self):
p = Period('2000-01-01 12:15:02.123567')
self.assertEqual("Period('2000-01-01 12:15:02.123567', 'U')", repr(p))
def test_strftime(self):
p = Period('2000-1-1 12:34:12', freq='S')
res = p.strftime('%Y-%m-%d %H:%M:%S')
self.assertEqual(res, '2000-01-01 12:34:12')
tm.assert_isinstance(res, compat.text_type) # GH3363
def test_sub_delta(self):
left, right = Period('2011', freq='A'), Period('2007', freq='A')
result = left - right
self.assertEqual(result, 4)
self.assertRaises(ValueError, left.__sub__,
Period('2007-01', freq='M'))
def test_to_timestamp(self):
p = Period('1982', freq='A')
start_ts = p.to_timestamp(how='S')
aliases = ['s', 'StarT', 'BEGIn']
for a in aliases:
self.assertEqual(start_ts, p.to_timestamp('D', how=a))
end_ts = p.to_timestamp(how='E')
aliases = ['e', 'end', 'FINIsH']
for a in aliases:
self.assertEqual(end_ts, p.to_timestamp('D', how=a))
from_lst = ['A', 'Q', 'M', 'W', 'B',
'D', 'H', 'Min', 'S']
def _ex(p):
return Timestamp((p + 1).start_time.value - 1)
for i, fcode in enumerate(from_lst):
p = Period('1982', freq=fcode)
result = p.to_timestamp().to_period(fcode)
self.assertEqual(result, p)
self.assertEqual(p.start_time, p.to_timestamp(how='S'))
self.assertEqual(p.end_time, _ex(p))
# Frequency other than daily
p = Period('1985', freq='A')
result = p.to_timestamp('H', how='end')
expected = datetime(1985, 12, 31, 23)
self.assertEqual(result, expected)
result = p.to_timestamp('T', how='end')
expected = datetime(1985, 12, 31, 23, 59)
self.assertEqual(result, expected)
result = p.to_timestamp(how='end')
expected = datetime(1985, 12, 31)
self.assertEqual(result, expected)
expected = datetime(1985, 1, 1)
result = p.to_timestamp('H', how='start')
self.assertEqual(result, expected)
result = p.to_timestamp('T', how='start')
self.assertEqual(result, expected)
result = p.to_timestamp('S', how='start')
self.assertEqual(result, expected)
assertRaisesRegexp(ValueError, 'Only mult == 1', p.to_timestamp, '5t')
p = Period('NaT', freq='W')
self.assertTrue(p.to_timestamp() is tslib.NaT)
def test_start_time(self):
freq_lst = ['A', 'Q', 'M', 'D', 'H', 'T', 'S']
xp = datetime(2012, 1, 1)
for f in freq_lst:
p = Period('2012', freq=f)
self.assertEqual(p.start_time, xp)
self.assertEqual(Period('2012', freq='B').start_time,
datetime(2012, 1, 2))
self.assertEqual(Period('2012', freq='W').start_time,
datetime(2011, 12, 26))
p = Period('NaT', freq='W')
self.assertTrue(p.start_time is tslib.NaT)
def test_end_time(self):
p = Period('2012', freq='A')
def _ex(*args):
return Timestamp(Timestamp(datetime(*args)).value - 1)
xp = _ex(2013, 1, 1)
self.assertEqual(xp, p.end_time)
p = Period('2012', freq='Q')
xp = _ex(2012, 4, 1)
self.assertEqual(xp, p.end_time)
p = Period('2012', freq='M')
xp = _ex(2012, 2, 1)
self.assertEqual(xp, p.end_time)
xp = _ex(2012, 1, 2)
p = Period('2012', freq='D')
self.assertEqual(p.end_time, xp)
xp = _ex(2012, 1, 1, 1)
p = Period('2012', freq='H')
self.assertEqual(p.end_time, xp)
xp = _ex(2012, 1, 3)
self.assertEqual(Period('2012', freq='B').end_time, xp)
xp = _ex(2012, 1, 2)
self.assertEqual(Period('2012', freq='W').end_time, xp)
p = Period('NaT', freq='W')
self.assertTrue(p.end_time is tslib.NaT)
def test_anchor_week_end_time(self):
def _ex(*args):
return Timestamp(Timestamp(datetime(*args)).value - 1)
p = Period('2013-1-1', 'W-SAT')
xp = _ex(2013, 1, 6)
self.assertEqual(p.end_time, xp)
def test_properties_annually(self):
# Test properties on Periods with annually frequency.
a_date = Period(freq='A', year=2007)
assert_equal(a_date.year, 2007)
def test_properties_quarterly(self):
# Test properties on Periods with daily frequency.
qedec_date = Period(freq="Q-DEC", year=2007, quarter=1)
qejan_date = Period(freq="Q-JAN", year=2007, quarter=1)
qejun_date = Period(freq="Q-JUN", year=2007, quarter=1)
#
for x in range(3):
for qd in (qedec_date, qejan_date, qejun_date):
assert_equal((qd + x).qyear, 2007)
assert_equal((qd + x).quarter, x + 1)
def test_properties_monthly(self):
# Test properties on Periods with daily frequency.
m_date = Period(freq='M', year=2007, month=1)
for x in range(11):
m_ival_x = m_date + x
assert_equal(m_ival_x.year, 2007)
if 1 <= x + 1 <= 3:
assert_equal(m_ival_x.quarter, 1)
elif 4 <= x + 1 <= 6:
assert_equal(m_ival_x.quarter, 2)
elif 7 <= x + 1 <= 9:
assert_equal(m_ival_x.quarter, 3)
elif 10 <= x + 1 <= 12:
assert_equal(m_ival_x.quarter, 4)
assert_equal(m_ival_x.month, x + 1)
def test_properties_weekly(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq='WK', year=2007, month=1, day=7)
#
assert_equal(w_date.year, 2007)
assert_equal(w_date.quarter, 1)
assert_equal(w_date.month, 1)
assert_equal(w_date.week, 1)
assert_equal((w_date - 1).week, 52)
assert_equal(w_date.days_in_month, 31)
assert_equal(Period(freq='WK', year=2012, month=2, day=1).days_in_month, 29)
def test_properties_daily(self):
# Test properties on Periods with daily frequency.
b_date = Period(freq='B', year=2007, month=1, day=1)
#
assert_equal(b_date.year, 2007)
assert_equal(b_date.quarter, 1)
assert_equal(b_date.month, 1)
assert_equal(b_date.day, 1)
assert_equal(b_date.weekday, 0)
assert_equal(b_date.dayofyear, 1)
assert_equal(b_date.days_in_month, 31)
assert_equal(Period(freq='B', year=2012, month=2, day=1).days_in_month, 29)
#
d_date = Period(freq='D', year=2007, month=1, day=1)
#
assert_equal(d_date.year, 2007)
assert_equal(d_date.quarter, 1)
assert_equal(d_date.month, 1)
assert_equal(d_date.day, 1)
assert_equal(d_date.weekday, 0)
assert_equal(d_date.dayofyear, 1)
assert_equal(d_date.days_in_month, 31)
assert_equal(Period(freq='D', year=2012, month=2,
day=1).days_in_month, 29)
def test_properties_hourly(self):
# Test properties on Periods with hourly frequency.
h_date = Period(freq='H', year=2007, month=1, day=1, hour=0)
#
assert_equal(h_date.year, 2007)
assert_equal(h_date.quarter, 1)
assert_equal(h_date.month, 1)
assert_equal(h_date.day, 1)
assert_equal(h_date.weekday, 0)
assert_equal(h_date.dayofyear, 1)
assert_equal(h_date.hour, 0)
assert_equal(h_date.days_in_month, 31)
assert_equal(Period(freq='H', year=2012, month=2, day=1,
hour=0).days_in_month, 29)
#
def test_properties_minutely(self):
# Test properties on Periods with minutely frequency.
t_date = Period(freq='Min', year=2007, month=1, day=1, hour=0,
minute=0)
#
assert_equal(t_date.quarter, 1)
assert_equal(t_date.month, 1)
assert_equal(t_date.day, 1)
assert_equal(t_date.weekday, 0)
assert_equal(t_date.dayofyear, 1)
assert_equal(t_date.hour, 0)
assert_equal(t_date.minute, 0)
assert_equal(t_date.days_in_month, 31)
assert_equal(Period(freq='D', year=2012, month=2, day=1, hour=0,
minute=0).days_in_month, 29)
def test_properties_secondly(self):
# Test properties on Periods with secondly frequency.
s_date = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
#
assert_equal(s_date.year, 2007)
assert_equal(s_date.quarter, 1)
assert_equal(s_date.month, 1)
assert_equal(s_date.day, 1)
assert_equal(s_date.weekday, 0)
assert_equal(s_date.dayofyear, 1)
assert_equal(s_date.hour, 0)
assert_equal(s_date.minute, 0)
assert_equal(s_date.second, 0)
assert_equal(s_date.days_in_month, 31)
assert_equal(Period(freq='Min', year=2012, month=2, day=1, hour=0,
minute=0, second=0).days_in_month, 29)
def test_properties_nat(self):
p_nat = Period('NaT', freq='M')
t_nat = pd.Timestamp('NaT')
# confirm Period('NaT') work identical with Timestamp('NaT')
for f in ['year', 'month', 'day', 'hour', 'minute', 'second',
'week', 'dayofyear', 'quarter', 'days_in_month']:
self.assertTrue(np.isnan(getattr(p_nat, f)))
self.assertTrue(np.isnan(getattr(t_nat, f)))
for f in ['weekofyear', 'dayofweek', 'weekday', 'qyear']:
self.assertTrue(np.isnan(getattr(p_nat, f)))
def test_pnow(self):
dt = datetime.now()
val = period.pnow('D')
exp = Period(dt, freq='D')
self.assertEqual(val, exp)
def test_constructor_corner(self):
self.assertRaises(ValueError, Period, year=2007, month=1,
freq='2M')
self.assertRaises(ValueError, Period, datetime.now())
self.assertRaises(ValueError, Period, datetime.now().date())
self.assertRaises(ValueError, Period, 1.6, freq='D')
self.assertRaises(ValueError, Period, ordinal=1.6, freq='D')
self.assertRaises(ValueError, Period, ordinal=2, value=1, freq='D')
self.assertRaises(ValueError, Period)
self.assertRaises(ValueError, Period, month=1)
p = Period('2007-01-01', freq='D')
result = Period(p, freq='A')
exp = Period('2007', freq='A')
self.assertEqual(result, exp)
def test_constructor_infer_freq(self):
p = Period('2007-01-01')
self.assertEqual(p.freq, 'D')
p = Period('2007-01-01 07')
self.assertEqual(p.freq, 'H')
p = Period('2007-01-01 07:10')
self.assertEqual(p.freq, 'T')
p = Period('2007-01-01 07:10:15')
self.assertEqual(p.freq, 'S')
p = Period('2007-01-01 07:10:15.123')
self.assertEqual(p.freq, 'L')
p = Period('2007-01-01 07:10:15.123000')
self.assertEqual(p.freq, 'L')
p = Period('2007-01-01 07:10:15.123400')
self.assertEqual(p.freq, 'U')
def test_asfreq_MS(self):
initial = Period("2013")
self.assertEqual(initial.asfreq(freq="M", how="S"), Period('2013-01', 'M'))
self.assertRaises(ValueError, initial.asfreq, freq="MS", how="S")
tm.assertRaisesRegexp(ValueError, "Unknown freqstr: MS", pd.Period, '2013-01', 'MS')
self.assertTrue(_period_code_map.get("MS") is None)
def noWrap(item):
return item
class TestFreqConversion(tm.TestCase):
"Test frequency conversion of date objects"
def test_asfreq_corner(self):
val = Period(freq='A', year=2007)
self.assertRaises(ValueError, val.asfreq, '5t')
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='WK', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
assert_equal(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
assert_equal(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
assert_equal(ival_A.asfreq('M', 's'), ival_A_to_M_start)
assert_equal(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
assert_equal(ival_A.asfreq('WK', 'S'), ival_A_to_W_start)
assert_equal(ival_A.asfreq('WK', 'E'), ival_A_to_W_end)
assert_equal(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
assert_equal(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
assert_equal(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
assert_equal(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
assert_equal(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
assert_equal(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
assert_equal(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
assert_equal(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
assert_equal(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
assert_equal(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
assert_equal(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
assert_equal(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
assert_equal(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
assert_equal(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
assert_equal(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
assert_equal(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
assert_equal(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='WK', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30)
assert_equal(ival_Q.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q_end_of_year.asfreq('A'), ival_Q_to_A)
assert_equal(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start)
assert_equal(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end)
assert_equal(ival_Q.asfreq('WK', 'S'), ival_Q_to_W_start)
assert_equal(ival_Q.asfreq('WK', 'E'), ival_Q_to_W_end)
assert_equal(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start)
assert_equal(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end)
assert_equal(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start)
assert_equal(ival_Q.asfreq('D', 'E'), ival_Q_to_D_end)
assert_equal(ival_Q.asfreq('H', 'S'), ival_Q_to_H_start)
assert_equal(ival_Q.asfreq('H', 'E'), ival_Q_to_H_end)
assert_equal(ival_Q.asfreq('Min', 'S'), ival_Q_to_T_start)
assert_equal(ival_Q.asfreq('Min', 'E'), ival_Q_to_T_end)
assert_equal(ival_Q.asfreq('S', 'S'), ival_Q_to_S_start)
assert_equal(ival_Q.asfreq('S', 'E'), ival_Q_to_S_end)
assert_equal(ival_QEJAN.asfreq('D', 'S'), ival_QEJAN_to_D_start)
assert_equal(ival_QEJAN.asfreq('D', 'E'), ival_QEJAN_to_D_end)
assert_equal(ival_QEJUN.asfreq('D', 'S'), ival_QEJUN_to_D_start)
assert_equal(ival_QEJUN.asfreq('D', 'E'), ival_QEJUN_to_D_end)
assert_equal(ival_Q.asfreq('Q'), ival_Q)
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq='M', year=2007, month=1)
ival_M_end_of_year = Period(freq='M', year=2007, month=12)
ival_M_end_of_quarter = Period(freq='M', year=2007, month=3)
ival_M_to_A = Period(freq='A', year=2007)
ival_M_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_M_to_W_start = Period(freq='WK', year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq='WK', year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_M_to_H_end = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_M_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_M_to_T_end = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_M_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_M_to_S_end = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
assert_equal(ival_M.asfreq('A'), ival_M_to_A)
assert_equal(ival_M_end_of_year.asfreq('A'), ival_M_to_A)
assert_equal(ival_M.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q)
assert_equal(ival_M.asfreq('WK', 'S'), ival_M_to_W_start)
assert_equal(ival_M.asfreq('WK', 'E'), ival_M_to_W_end)
assert_equal(ival_M.asfreq('B', 'S'), ival_M_to_B_start)
assert_equal(ival_M.asfreq('B', 'E'), ival_M_to_B_end)
assert_equal(ival_M.asfreq('D', 'S'), ival_M_to_D_start)
assert_equal(ival_M.asfreq('D', 'E'), ival_M_to_D_end)
assert_equal(ival_M.asfreq('H', 'S'), ival_M_to_H_start)
assert_equal(ival_M.asfreq('H', 'E'), ival_M_to_H_end)
assert_equal(ival_M.asfreq('Min', 'S'), ival_M_to_T_start)
assert_equal(ival_M.asfreq('Min', 'E'), ival_M_to_T_end)
assert_equal(ival_M.asfreq('S', 'S'), ival_M_to_S_start)
assert_equal(ival_M.asfreq('S', 'E'), ival_M_to_S_end)
assert_equal(ival_M.asfreq('M'), ival_M)
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq='WK', year=2007, month=1, day=1)
ival_WSUN = Period(freq='WK', year=2007, month=1, day=7)
ival_WSAT = Period(freq='WK-SAT', year=2007, month=1, day=6)
ival_WFRI = Period(freq='WK-FRI', year=2007, month=1, day=5)
ival_WTHU = Period(freq='WK-THU', year=2007, month=1, day=4)
ival_WWED = Period(freq='WK-WED', year=2007, month=1, day=3)
ival_WTUE = Period(freq='WK-TUE', year=2007, month=1, day=2)
ival_WMON = Period(freq='WK-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq='D', year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq='D', year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq='D', year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq='D', year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq='D', year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq='D', year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq='D', year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq='D', year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq='D', year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq='D', year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq='D', year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq='WK', year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq='WK', year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq='WK', year=2007, month=1, day=31)
ival_W_to_A = Period(freq='A', year=2007)
ival_W_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_W_to_M = Period(freq='M', year=2007, month=1)
if Period(freq='D', year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq='A', year=2007)
else:
ival_W_to_A_end_of_year = Period(freq='A', year=2008)
if Period(freq='D', year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq='Q', year=2007,
quarter=2)
if Period(freq='D', year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq='M', year=2007, month=2)
ival_W_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq='B', year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_W_to_H_end = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_W_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_W_to_T_end = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_W_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_W_to_S_end = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
assert_equal(ival_W.asfreq('A'), ival_W_to_A)
assert_equal(ival_W_end_of_year.asfreq('A'),
ival_W_to_A_end_of_year)
assert_equal(ival_W.asfreq('Q'), ival_W_to_Q)
assert_equal(ival_W_end_of_quarter.asfreq('Q'),
ival_W_to_Q_end_of_quarter)
assert_equal(ival_W.asfreq('M'), ival_W_to_M)
assert_equal(ival_W_end_of_month.asfreq('M'),
ival_W_to_M_end_of_month)
assert_equal(ival_W.asfreq('B', 'S'), ival_W_to_B_start)
assert_equal(ival_W.asfreq('B', 'E'), ival_W_to_B_end)
assert_equal(ival_W.asfreq('D', 'S'), ival_W_to_D_start)
assert_equal(ival_W.asfreq('D', 'E'), ival_W_to_D_end)
assert_equal(ival_WSUN.asfreq('D', 'S'), ival_WSUN_to_D_start)
assert_equal(ival_WSUN.asfreq('D', 'E'), ival_WSUN_to_D_end)
assert_equal(ival_WSAT.asfreq('D', 'S'), ival_WSAT_to_D_start)
assert_equal(ival_WSAT.asfreq('D', 'E'), ival_WSAT_to_D_end)
assert_equal(ival_WFRI.asfreq('D', 'S'), ival_WFRI_to_D_start)
assert_equal(ival_WFRI.asfreq('D', 'E'), ival_WFRI_to_D_end)
assert_equal(ival_WTHU.asfreq('D', 'S'), ival_WTHU_to_D_start)
assert_equal(ival_WTHU.asfreq('D', 'E'), ival_WTHU_to_D_end)
assert_equal(ival_WWED.asfreq('D', 'S'), ival_WWED_to_D_start)
assert_equal(ival_WWED.asfreq('D', 'E'), ival_WWED_to_D_end)
assert_equal(ival_WTUE.asfreq('D', 'S'), ival_WTUE_to_D_start)
assert_equal(ival_WTUE.asfreq('D', 'E'), ival_WTUE_to_D_end)
assert_equal(ival_WMON.asfreq('D', 'S'), ival_WMON_to_D_start)
assert_equal(ival_WMON.asfreq('D', 'E'), ival_WMON_to_D_end)
assert_equal(ival_W.asfreq('H', 'S'), ival_W_to_H_start)
assert_equal(ival_W.asfreq('H', 'E'), ival_W_to_H_end)
assert_equal(ival_W.asfreq('Min', 'S'), ival_W_to_T_start)
assert_equal(ival_W.asfreq('Min', 'E'), ival_W_to_T_end)
assert_equal(ival_W.asfreq('S', 'S'), ival_W_to_S_start)
assert_equal(ival_W.asfreq('S', 'E'), ival_W_to_S_end)
assert_equal(ival_W.asfreq('WK'), ival_W)
def test_conv_business(self):
# frequency conversion tests: from Business Frequency"
ival_B = Period(freq='B', year=2007, month=1, day=1)
ival_B_end_of_year = Period(freq='B', year=2007, month=12, day=31)
ival_B_end_of_quarter = Period(freq='B', year=2007, month=3, day=30)
ival_B_end_of_month = Period(freq='B', year=2007, month=1, day=31)
ival_B_end_of_week = Period(freq='B', year=2007, month=1, day=5)
ival_B_to_A = Period(freq='A', year=2007)
ival_B_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_B_to_M = Period(freq='M', year=2007, month=1)
ival_B_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_B_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_B_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_B_to_H_end = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_B_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_B_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_B_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_B_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
assert_equal(ival_B.asfreq('A'), ival_B_to_A)
assert_equal(ival_B_end_of_year.asfreq('A'), ival_B_to_A)
assert_equal(ival_B.asfreq('Q'), ival_B_to_Q)
assert_equal(ival_B_end_of_quarter.asfreq('Q'), ival_B_to_Q)
assert_equal(ival_B.asfreq('M'), ival_B_to_M)
assert_equal(ival_B_end_of_month.asfreq('M'), ival_B_to_M)
assert_equal(ival_B.asfreq('WK'), ival_B_to_W)
assert_equal(ival_B_end_of_week.asfreq('WK'), ival_B_to_W)
assert_equal(ival_B.asfreq('D'), ival_B_to_D)
assert_equal(ival_B.asfreq('H', 'S'), ival_B_to_H_start)
assert_equal(ival_B.asfreq('H', 'E'), ival_B_to_H_end)
assert_equal(ival_B.asfreq('Min', 'S'), ival_B_to_T_start)
assert_equal(ival_B.asfreq('Min', 'E'), ival_B_to_T_end)
assert_equal(ival_B.asfreq('S', 'S'), ival_B_to_S_start)
assert_equal(ival_B.asfreq('S', 'E'), ival_B_to_S_end)
assert_equal(ival_B.asfreq('B'), ival_B)
def test_conv_daily(self):
# frequency conversion tests: from Business Frequency"
ival_D = Period(freq='D', year=2007, month=1, day=1)
ival_D_end_of_year = Period(freq='D', year=2007, month=12, day=31)
ival_D_end_of_quarter = Period(freq='D', year=2007, month=3, day=31)
ival_D_end_of_month = Period(freq='D', year=2007, month=1, day=31)
ival_D_end_of_week = Period(freq='D', year=2007, month=1, day=7)
ival_D_friday = Period(freq='D', year=2007, month=1, day=5)
ival_D_saturday = Period(freq='D', year=2007, month=1, day=6)
ival_D_sunday = Period(freq='D', year=2007, month=1, day=7)
ival_D_monday = Period(freq='D', year=2007, month=1, day=8)
ival_B_friday = Period(freq='B', year=2007, month=1, day=5)
ival_B_monday = Period(freq='B', year=2007, month=1, day=8)
ival_D_to_A = Period(freq='A', year=2007)
ival_Deoq_to_AJAN = Period(freq='A-JAN', year=2008)
ival_Deoq_to_AJUN = Period(freq='A-JUN', year=2007)
ival_Deoq_to_ADEC = Period(freq='A-DEC', year=2007)
ival_D_to_QEJAN = Period(freq="Q-JAN", year=2007, quarter=4)
ival_D_to_QEJUN = Period(freq="Q-JUN", year=2007, quarter=3)
ival_D_to_QEDEC = Period(freq="Q-DEC", year=2007, quarter=1)
ival_D_to_M = Period(freq='M', year=2007, month=1)
ival_D_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_D_to_H_start = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_D_to_H_end = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_D_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_D_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_D_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_D_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
assert_equal(ival_D.asfreq('A'), ival_D_to_A)
assert_equal(ival_D_end_of_quarter.asfreq('A-JAN'),
ival_Deoq_to_AJAN)
assert_equal(ival_D_end_of_quarter.asfreq('A-JUN'),
ival_Deoq_to_AJUN)
assert_equal(ival_D_end_of_quarter.asfreq('A-DEC'),
ival_Deoq_to_ADEC)
assert_equal(ival_D_end_of_year.asfreq('A'), ival_D_to_A)
assert_equal(ival_D_end_of_quarter.asfreq('Q'), ival_D_to_QEDEC)
assert_equal(ival_D.asfreq("Q-JAN"), ival_D_to_QEJAN)
assert_equal(ival_D.asfreq("Q-JUN"), ival_D_to_QEJUN)
assert_equal(ival_D.asfreq("Q-DEC"), ival_D_to_QEDEC)
assert_equal(ival_D.asfreq('M'), ival_D_to_M)
assert_equal(ival_D_end_of_month.asfreq('M'), ival_D_to_M)
assert_equal(ival_D.asfreq('WK'), ival_D_to_W)
assert_equal(ival_D_end_of_week.asfreq('WK'), ival_D_to_W)
assert_equal(ival_D_friday.asfreq('B'), ival_B_friday)
assert_equal(ival_D_saturday.asfreq('B', 'S'), ival_B_friday)
assert_equal(ival_D_saturday.asfreq('B', 'E'), ival_B_monday)
assert_equal(ival_D_sunday.asfreq('B', 'S'), ival_B_friday)
assert_equal(ival_D_sunday.asfreq('B', 'E'), ival_B_monday)
assert_equal(ival_D.asfreq('H', 'S'), ival_D_to_H_start)
assert_equal(ival_D.asfreq('H', 'E'), ival_D_to_H_end)
assert_equal(ival_D.asfreq('Min', 'S'), ival_D_to_T_start)
assert_equal(ival_D.asfreq('Min', 'E'), ival_D_to_T_end)
assert_equal(ival_D.asfreq('S', 'S'), ival_D_to_S_start)
assert_equal(ival_D.asfreq('S', 'E'), ival_D_to_S_end)
assert_equal(ival_D.asfreq('D'), ival_D)
def test_conv_hourly(self):
# frequency conversion tests: from Hourly Frequency"
ival_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_H_end_of_year = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_H_end_of_quarter = Period(freq='H', year=2007, month=3, day=31,
hour=23)
ival_H_end_of_month = Period(freq='H', year=2007, month=1, day=31,
hour=23)
ival_H_end_of_week = Period(freq='H', year=2007, month=1, day=7,
hour=23)
ival_H_end_of_day = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_end_of_bus = Period(freq='H', year=2007, month=1, day=1,
hour=23)
ival_H_to_A = Period(freq='A', year=2007)
ival_H_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_H_to_M = Period(freq='M', year=2007, month=1)
ival_H_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_H_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_H_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_H_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_H_to_T_end = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=59)
ival_H_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_H_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59)
assert_equal(ival_H.asfreq('A'), ival_H_to_A)
assert_equal(ival_H_end_of_year.asfreq('A'), ival_H_to_A)
assert_equal(ival_H.asfreq('Q'), ival_H_to_Q)
assert_equal(ival_H_end_of_quarter.asfreq('Q'), ival_H_to_Q)
assert_equal(ival_H.asfreq('M'), ival_H_to_M)
assert_equal(ival_H_end_of_month.asfreq('M'), ival_H_to_M)
assert_equal(ival_H.asfreq('WK'), ival_H_to_W)
assert_equal(ival_H_end_of_week.asfreq('WK'), ival_H_to_W)
assert_equal(ival_H.asfreq('D'), ival_H_to_D)
assert_equal(ival_H_end_of_day.asfreq('D'), ival_H_to_D)
assert_equal(ival_H.asfreq('B'), ival_H_to_B)
assert_equal(ival_H_end_of_bus.asfreq('B'), ival_H_to_B)
assert_equal(ival_H.asfreq('Min', 'S'), ival_H_to_T_start)
assert_equal(ival_H.asfreq('Min', 'E'), ival_H_to_T_end)
assert_equal(ival_H.asfreq('S', 'S'), ival_H_to_S_start)
assert_equal(ival_H.asfreq('S', 'E'), ival_H_to_S_end)
assert_equal(ival_H.asfreq('H'), ival_H)
def test_conv_minutely(self):
# frequency conversion tests: from Minutely Frequency"
ival_T = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_T_end_of_year = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_T_end_of_quarter = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_T_end_of_month = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_T_end_of_week = Period(freq='Min', year=2007, month=1, day=7,
hour=23, minute=59)
ival_T_end_of_day = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_bus = Period(freq='Min', year=2007, month=1, day=1,
hour=23, minute=59)
ival_T_end_of_hour = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=59)
ival_T_to_A = Period(freq='A', year=2007)
ival_T_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_T_to_M = Period(freq='M', year=2007, month=1)
ival_T_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_T_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_T_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_T_to_H = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_T_to_S_start = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_T_to_S_end = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59)
assert_equal(ival_T.asfreq('A'), ival_T_to_A)
assert_equal(ival_T_end_of_year.asfreq('A'), ival_T_to_A)
assert_equal(ival_T.asfreq('Q'), ival_T_to_Q)
assert_equal(ival_T_end_of_quarter.asfreq('Q'), ival_T_to_Q)
assert_equal(ival_T.asfreq('M'), ival_T_to_M)
assert_equal(ival_T_end_of_month.asfreq('M'), ival_T_to_M)
assert_equal(ival_T.asfreq('WK'), ival_T_to_W)
assert_equal(ival_T_end_of_week.asfreq('WK'), ival_T_to_W)
assert_equal(ival_T.asfreq('D'), ival_T_to_D)
assert_equal(ival_T_end_of_day.asfreq('D'), ival_T_to_D)
assert_equal(ival_T.asfreq('B'), ival_T_to_B)
assert_equal(ival_T_end_of_bus.asfreq('B'), ival_T_to_B)
assert_equal(ival_T.asfreq('H'), ival_T_to_H)
assert_equal(ival_T_end_of_hour.asfreq('H'), ival_T_to_H)
assert_equal(ival_T.asfreq('S', 'S'), ival_T_to_S_start)
assert_equal(ival_T.asfreq('S', 'E'), ival_T_to_S_end)
assert_equal(ival_T.asfreq('Min'), ival_T)
def test_conv_secondly(self):
# frequency conversion tests: from Secondly Frequency"
ival_S = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=0)
ival_S_end_of_year = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_quarter = Period(freq='S', year=2007, month=3, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_month = Period(freq='S', year=2007, month=1, day=31,
hour=23, minute=59, second=59)
ival_S_end_of_week = Period(freq='S', year=2007, month=1, day=7,
hour=23, minute=59, second=59)
ival_S_end_of_day = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_bus = Period(freq='S', year=2007, month=1, day=1,
hour=23, minute=59, second=59)
ival_S_end_of_hour = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=59, second=59)
ival_S_end_of_minute = Period(freq='S', year=2007, month=1, day=1,
hour=0, minute=0, second=59)
ival_S_to_A = Period(freq='A', year=2007)
ival_S_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_S_to_M = Period(freq='M', year=2007, month=1)
ival_S_to_W = Period(freq='WK', year=2007, month=1, day=7)
ival_S_to_D = Period(freq='D', year=2007, month=1, day=1)
ival_S_to_B = Period(freq='B', year=2007, month=1, day=1)
ival_S_to_H = Period(freq='H', year=2007, month=1, day=1,
hour=0)
ival_S_to_T = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
assert_equal(ival_S.asfreq('A'), ival_S_to_A)
assert_equal(ival_S_end_of_year.asfreq('A'), ival_S_to_A)
assert_equal(ival_S.asfreq('Q'), ival_S_to_Q)
assert_equal(ival_S_end_of_quarter.asfreq('Q'), ival_S_to_Q)
assert_equal(ival_S.asfreq('M'), ival_S_to_M)
assert_equal(ival_S_end_of_month.asfreq('M'), ival_S_to_M)
assert_equal(ival_S.asfreq('WK'), ival_S_to_W)
assert_equal(ival_S_end_of_week.asfreq('WK'), ival_S_to_W)
assert_equal(ival_S.asfreq('D'), ival_S_to_D)
assert_equal(ival_S_end_of_day.asfreq('D'), ival_S_to_D)
assert_equal(ival_S.asfreq('B'), ival_S_to_B)
assert_equal(ival_S_end_of_bus.asfreq('B'), ival_S_to_B)
assert_equal(ival_S.asfreq('H'), ival_S_to_H)
assert_equal(ival_S_end_of_hour.asfreq('H'), ival_S_to_H)
assert_equal(ival_S.asfreq('Min'), ival_S_to_T)
assert_equal(ival_S_end_of_minute.asfreq('Min'), ival_S_to_T)
assert_equal(ival_S.asfreq('S'), ival_S)
def test_asfreq_nat(self):
p = Period('NaT', freq='A')
result = p.asfreq('M')
self.assertEqual(result.ordinal, tslib.iNaT)
self.assertEqual(result.freq, 'M')
class TestPeriodIndex(tm.TestCase):
def setUp(self):
pass
def test_hash_error(self):
index = period_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_make_time_series(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index)
tm.assert_isinstance(series, TimeSeries)
def test_astype(self):
idx = period_range('1990', '2009', freq='A')
result = idx.astype('i8')
self.assert_numpy_array_equal(result, idx.values)
def test_constructor_use_start_freq(self):
# GH #1118
p = Period('4/2/2012', freq='B')
index = PeriodIndex(start=p, periods=10)
expected = PeriodIndex(start='4/2/2012', periods=10, freq='B')
self.assertTrue(index.equals(expected))
def test_constructor_field_arrays(self):
# GH #1264
years = np.arange(1990, 2010).repeat(4)[2:-2]
quarters = np.tile(np.arange(1, 5), 20)[2:-2]
index = PeriodIndex(year=years, quarter=quarters, freq='Q-DEC')
expected = period_range('1990Q3', '2009Q2', freq='Q-DEC')
self.assertTrue(index.equals(expected))
self.assertRaises(
ValueError, PeriodIndex, year=years, quarter=quarters,
freq='2Q-DEC')
index = PeriodIndex(year=years, quarter=quarters)
self.assertTrue(index.equals(expected))
years = [2007, 2007, 2007]
months = [1, 2]
self.assertRaises(ValueError, PeriodIndex, year=years, month=months,
freq='M')
self.assertRaises(ValueError, PeriodIndex, year=years, month=months,
freq='2M')
self.assertRaises(ValueError, PeriodIndex, year=years, month=months,
freq='M', start=Period('2007-01', freq='M'))
years = [2007, 2007, 2007]
months = [1, 2, 3]
idx = PeriodIndex(year=years, month=months, freq='M')
exp = period_range('2007-01', periods=3, freq='M')
self.assertTrue(idx.equals(exp))
def test_constructor_U(self):
# U was used as undefined period
self.assertRaises(ValueError, period_range, '2007-1-1', periods=500,
freq='X')
def test_constructor_arrays_negative_year(self):
years = np.arange(1960, 2000).repeat(4)
quarters = np.tile(lrange(1, 5), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
self.assert_numpy_array_equal(pindex.year, years)
self.assert_numpy_array_equal(pindex.quarter, quarters)
def test_constructor_invalid_quarters(self):
self.assertRaises(ValueError, PeriodIndex, year=lrange(2000, 2004),
quarter=lrange(4), freq='Q-DEC')
def test_constructor_corner(self):
self.assertRaises(ValueError, PeriodIndex, periods=10, freq='A')
start = Period('2007', freq='A-JUN')
end = Period('2010', freq='A-DEC')
self.assertRaises(ValueError, PeriodIndex, start=start, end=end)
self.assertRaises(ValueError, PeriodIndex, start=start)
self.assertRaises(ValueError, PeriodIndex, end=end)
result = period_range('2007-01', periods=10.5, freq='M')
exp = period_range('2007-01', periods=10, freq='M')
self.assertTrue(result.equals(exp))
def test_constructor_fromarraylike(self):
idx = period_range('2007-01', periods=20, freq='M')
self.assertRaises(ValueError, PeriodIndex, idx.values)
self.assertRaises(ValueError, PeriodIndex, list(idx.values))
self.assertRaises(ValueError, PeriodIndex,
data=Period('2007', freq='A'))
result = PeriodIndex(iter(idx))
self.assertTrue(result.equals(idx))
result = PeriodIndex(idx)
self.assertTrue(result.equals(idx))
result = PeriodIndex(idx, freq='M')
self.assertTrue(result.equals(idx))
result = PeriodIndex(idx, freq='D')
exp = idx.asfreq('D', 'e')
self.assertTrue(result.equals(exp))
def test_constructor_datetime64arr(self):
vals = np.arange(100000, 100000 + 10000, 100, dtype=np.int64)
vals = vals.view(np.dtype('M8[us]'))
self.assertRaises(ValueError, PeriodIndex, vals, freq='D')
def test_constructor_simple_new(self):
idx = period_range('2007-01', name='p', periods=20, freq='M')
result = idx._simple_new(idx, 'p', freq=idx.freq)
self.assertTrue(result.equals(idx))
result = idx._simple_new(idx.astype('i8'), 'p', freq=idx.freq)
self.assertTrue(result.equals(idx))
def test_constructor_nat(self):
self.assertRaises(
ValueError, period_range, start='NaT', end='2011-01-01', freq='M')
self.assertRaises(
ValueError, period_range, start='2011-01-01', end='NaT', freq='M')
def test_constructor_year_and_quarter(self):
year = pd.Series([2001, 2002, 2003])
quarter = year - 2000
idx = PeriodIndex(year=year, quarter=quarter)
strs = ['%dQ%d' % t for t in zip(quarter, year)]
lops = list(map(Period, strs))
p = PeriodIndex(lops)
tm.assert_index_equal(p, idx)
def test_is_(self):
create_index = lambda: PeriodIndex(freq='A', start='1/1/2001',
end='12/1/2009')
index = create_index()
self.assertEqual(index.is_(index), True)
self.assertEqual(index.is_(create_index()), False)
self.assertEqual(index.is_(index.view()), True)
self.assertEqual(index.is_(index.view().view().view().view().view()), True)
self.assertEqual(index.view().is_(index), True)
ind2 = index.view()
index.name = "Apple"
self.assertEqual(ind2.is_(index), True)
self.assertEqual(index.is_(index[:]), False)
self.assertEqual(index.is_(index.asfreq('M')), False)
self.assertEqual(index.is_(index.asfreq('A')), False)
self.assertEqual(index.is_(index - 2), False)
self.assertEqual(index.is_(index - 0), False)
def test_comp_period(self):
idx = period_range('2007-01', periods=20, freq='M')
result = idx < idx[10]
exp = idx.values < idx.values[10]
self.assert_numpy_array_equal(result, exp)
def test_getitem_ndim2(self):
idx = period_range('2007-01', periods=3, freq='M')
result = idx[:, None]
# MPL kludge
tm.assert_isinstance(result, PeriodIndex)
def test_getitem_partial(self):
rng = period_range('2007-01', periods=50, freq='M')
ts = Series(np.random.randn(len(rng)), rng)
self.assertRaises(KeyError, ts.__getitem__, '2006')
result = ts['2008']
self.assertTrue((result.index.year == 2008).all())
result = ts['2008':'2009']
self.assertEqual(len(result), 24)
result = ts['2008-1':'2009-12']
self.assertEqual(len(result), 24)
result = ts['2008Q1':'2009Q4']
self.assertEqual(len(result), 24)
result = ts[:'2009']
self.assertEqual(len(result), 36)
result = ts['2009':]
self.assertEqual(len(result), 50 - 24)
exp = result
result = ts[24:]
assert_series_equal(exp, result)
ts = ts[10:].append(ts[10:])
self.assertRaisesRegexp(
KeyError, "left slice bound for non-unique label: '2008'",
ts.__getitem__, slice('2008', '2009'))
def test_getitem_datetime(self):
rng = period_range(start='2012-01-01', periods=10, freq='W-MON')
ts = Series(lrange(len(rng)), index=rng)
dt1 = datetime(2011, 10, 2)
dt4 = datetime(2012, 4, 20)
rs = ts[dt1:dt4]
assert_series_equal(rs, ts)
def test_slice_with_negative_step(self):
ts = Series(np.arange(20),
period_range('2014-01', periods=20, freq='M'))
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
assert_series_equal(ts[l_slc], ts.iloc[i_slc])
assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc])
assert_series_equal(ts.ix[l_slc], ts.iloc[i_slc])
assert_slices_equivalent(SLC[Period('2014-10')::-1], SLC[9::-1])
assert_slices_equivalent(SLC['2014-10'::-1], SLC[9::-1])
assert_slices_equivalent(SLC[:Period('2014-10'):-1], SLC[:8:-1])
assert_slices_equivalent(SLC[:'2014-10':-1], SLC[:8:-1])
assert_slices_equivalent(SLC['2015-02':'2014-10':-1], SLC[13:8:-1])
assert_slices_equivalent(SLC[Period('2015-02'):Period('2014-10'):-1], SLC[13:8:-1])
assert_slices_equivalent(SLC['2015-02':Period('2014-10'):-1], SLC[13:8:-1])
assert_slices_equivalent(SLC[Period('2015-02'):'2014-10':-1], SLC[13:8:-1])
assert_slices_equivalent(SLC['2014-10':'2015-02':-1], SLC[:0])
def test_slice_with_zero_step_raises(self):
ts = Series(np.arange(20),
period_range('2014-01', periods=20, freq='M'))
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts.loc[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: ts.ix[::0])
def test_sub(self):
rng = period_range('2007-01', periods=50)
result = rng - 5
exp = rng + (-5)
self.assertTrue(result.equals(exp))
def test_periods_number_check(self):
self.assertRaises(
ValueError, period_range, '2011-1-1', '2012-1-1', 'B')
def test_tolist(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
rs = index.tolist()
[tm.assert_isinstance(x, Period) for x in rs]
recon = PeriodIndex(rs)
self.assertTrue(index.equals(recon))
def test_to_timestamp(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
series = Series(1, index=index, name='foo')
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
result = series.to_timestamp(how='end')
self.assertTrue(result.index.equals(exp_index))
self.assertEqual(result.name, 'foo')
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-JAN')
result = series.to_timestamp(how='start')
self.assertTrue(result.index.equals(exp_index))
def _get_with_delta(delta, freq='A-DEC'):
return date_range(to_datetime('1/1/2001') + delta,
to_datetime('12/31/2009') + delta, freq=freq)
delta = timedelta(hours=23)
result = series.to_timestamp('H', 'end')
exp_index = _get_with_delta(delta)
self.assertTrue(result.index.equals(exp_index))
delta = timedelta(hours=23, minutes=59)
result = series.to_timestamp('T', 'end')
exp_index = _get_with_delta(delta)
self.assertTrue(result.index.equals(exp_index))
result = series.to_timestamp('S', 'end')
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
self.assertTrue(result.index.equals(exp_index))
self.assertRaises(ValueError, index.to_timestamp, '5t')
index = PeriodIndex(freq='H', start='1/1/2001', end='1/2/2001')
series = Series(1, index=index, name='foo')
exp_index = date_range('1/1/2001 00:59:59', end='1/2/2001 00:59:59',
freq='H')
result = series.to_timestamp(how='end')
self.assertTrue(result.index.equals(exp_index))
self.assertEqual(result.name, 'foo')
def test_to_timestamp_quarterly_bug(self):
years = np.arange(1960, 2000).repeat(4)
quarters = np.tile(lrange(1, 5), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
stamps = pindex.to_timestamp('D', 'end')
expected = DatetimeIndex([x.to_timestamp('D', 'end') for x in pindex])
self.assertTrue(stamps.equals(expected))
def test_to_timestamp_preserve_name(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009',
name='foo')
self.assertEqual(index.name, 'foo')
conv = index.to_timestamp('D')
self.assertEqual(conv.name, 'foo')
def test_to_timestamp_repr_is_code(self):
zs=[Timestamp('99-04-17 00:00:00',tz='UTC'),
Timestamp('2001-04-17 00:00:00',tz='UTC'),
Timestamp('2001-04-17 00:00:00',tz='America/Los_Angeles'),
Timestamp('2001-04-17 00:00:00',tz=None)]
for z in zs:
self.assertEqual( eval(repr(z)), z)
def test_to_timestamp_period_nat(self):
# GH 7228
index = PeriodIndex(['NaT', '2011-01', '2011-02'], freq='M', name='idx')
result = index.to_timestamp('D')
expected = DatetimeIndex([pd.NaT, datetime(2011, 1, 1),
datetime(2011, 2, 1)], name='idx')
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, 'idx')
result2 = result.to_period(freq='M')
self.assertTrue(result2.equals(index))
self.assertEqual(result2.name, 'idx')
def test_as_frame_columns(self):
rng = period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
ts = df[rng[0]]
assert_series_equal(ts, df.ix[:, 0])
# GH # 1211
repr(df)
ts = df['1/1/2000']
assert_series_equal(ts, df.ix[:, 0])
def test_indexing(self):
# GH 4390, iat incorrectly indexing
index = period_range('1/1/2001', periods=10)
s = Series(randn(10), index=index)
expected = s[index[0]]
result = s.iat[0]
self.assertEqual(expected, result)
def test_frame_setitem(self):
rng = period_range('1/1/2000', periods=5)
rng.name = 'index'
df = DataFrame(randn(5, 3), index=rng)
df['Index'] = rng
rs = Index(df['Index'])
self.assertTrue(rs.equals(rng))
rs = df.reset_index().set_index('index')
tm.assert_isinstance(rs.index, PeriodIndex)
self.assertTrue(rs.index.equals(rng))
def test_period_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = period_range('2011/01/01', periods=6, freq='M')
idx2 = period_range('2013', periods=6, freq='A')
df = df.set_index(idx1)
self.assertTrue(df.index.equals(idx1))
df = df.set_index(idx2)
self.assertTrue(df.index.equals(idx2))
def test_nested_dict_frame_constructor(self):
rng = period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def test_frame_to_time_stamp(self):
K = 5
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
df = DataFrame(randn(len(index), K), index=index)
df['mix'] = 'a'
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
result = df.to_timestamp('D', 'end')
self.assertTrue(result.index.equals(exp_index))
assert_almost_equal(result.values, df.values)
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-JAN')
result = df.to_timestamp('D', 'start')
self.assertTrue(result.index.equals(exp_index))
def _get_with_delta(delta, freq='A-DEC'):
return date_range(to_datetime('1/1/2001') + delta,
to_datetime('12/31/2009') + delta, freq=freq)
delta = timedelta(hours=23)
result = df.to_timestamp('H', 'end')
exp_index = _get_with_delta(delta)
self.assertTrue(result.index.equals(exp_index))
delta = timedelta(hours=23, minutes=59)
result = df.to_timestamp('T', 'end')
exp_index = _get_with_delta(delta)
self.assertTrue(result.index.equals(exp_index))
result = df.to_timestamp('S', 'end')
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
self.assertTrue(result.index.equals(exp_index))
# columns
df = df.T
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
result = df.to_timestamp('D', 'end', axis=1)
self.assertTrue(result.columns.equals(exp_index))
assert_almost_equal(result.values, df.values)
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-JAN')
result = df.to_timestamp('D', 'start', axis=1)
self.assertTrue(result.columns.equals(exp_index))
delta = timedelta(hours=23)
result = df.to_timestamp('H', 'end', axis=1)
exp_index = _get_with_delta(delta)
self.assertTrue(result.columns.equals(exp_index))
delta = timedelta(hours=23, minutes=59)
result = df.to_timestamp('T', 'end', axis=1)
exp_index = _get_with_delta(delta)
self.assertTrue(result.columns.equals(exp_index))
result = df.to_timestamp('S', 'end', axis=1)
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
self.assertTrue(result.columns.equals(exp_index))
# invalid axis
assertRaisesRegexp(ValueError, 'axis', df.to_timestamp, axis=2)
assertRaisesRegexp(ValueError, 'Only mult == 1', df.to_timestamp, '5t', axis=1)
def test_index_duplicate_periods(self):
# monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq='A-JUN')
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts[2007]
expected = ts[1:3]
assert_series_equal(result, expected)
result[:] = 1
self.assertTrue((ts[1:3] == 1).all())
# not monotonic
idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq='A-JUN')
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts[2007]
expected = ts[idx == 2007]
assert_series_equal(result, expected)
def test_index_unique(self):
idx = PeriodIndex([2000, 2007, 2007, 2009, 2009], freq='A-JUN')
expected = PeriodIndex([2000, 2007, 2009], freq='A-JUN')
self.assert_numpy_array_equal(idx.unique(), expected.values)
self.assertEqual(idx.nunique(), 3)
idx = PeriodIndex([2000, 2007, 2007, 2009, 2007], freq='A-JUN', tz='US/Eastern')
expected = PeriodIndex([2000, 2007, 2009], freq='A-JUN', tz='US/Eastern')
self.assert_numpy_array_equal(idx.unique(), expected.values)
self.assertEqual(idx.nunique(), 3)
def test_constructor(self):
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
assert_equal(len(pi), 9)
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009')
assert_equal(len(pi), 4 * 9)
pi = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
assert_equal(len(pi), 12 * 9)
pi = PeriodIndex(freq='D', start='1/1/2001', end='12/31/2009')
assert_equal(len(pi), 365 * 9 + 2)
pi = PeriodIndex(freq='B', start='1/1/2001', end='12/31/2009')
assert_equal(len(pi), 261 * 9)
pi = PeriodIndex(freq='H', start='1/1/2001', end='12/31/2001 23:00')
assert_equal(len(pi), 365 * 24)
pi = PeriodIndex(freq='Min', start='1/1/2001', end='1/1/2001 23:59')
assert_equal(len(pi), 24 * 60)
pi = PeriodIndex(freq='S', start='1/1/2001', end='1/1/2001 23:59:59')
assert_equal(len(pi), 24 * 60 * 60)
start = Period('02-Apr-2005', 'B')
i1 = PeriodIndex(start=start, periods=20)
assert_equal(len(i1), 20)
assert_equal(i1.freq, start.freq)
assert_equal(i1[0], start)
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), 10)
assert_equal(i1.freq, end_intv.freq)
assert_equal(i1[-1], end_intv)
end_intv = Period('2006-12-31', '1w')
i2 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), len(i2))
self.assertTrue((i1 == i2).all())
assert_equal(i1.freq, i2.freq)
end_intv = Period('2006-12-31', ('w', 1))
i2 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), len(i2))
self.assertTrue((i1 == i2).all())
assert_equal(i1.freq, i2.freq)
try:
PeriodIndex(start=start, end=end_intv)
raise AssertionError('Cannot allow mixed freq for start and end')
except ValueError:
pass
end_intv = Period('2005-05-01', 'B')
i1 = PeriodIndex(start=start, end=end_intv)
try:
PeriodIndex(start=start)
raise AssertionError(
'Must specify periods if missing start or end')
except ValueError:
pass
# infer freq from first element
i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')])
assert_equal(len(i2), 2)
assert_equal(i2[0], end_intv)
i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')]))
assert_equal(len(i2), 2)
assert_equal(i2[0], end_intv)
# Mixed freq should fail
vals = [end_intv, Period('2006-12-31', 'w')]
self.assertRaises(ValueError, PeriodIndex, vals)
vals = np.array(vals)
self.assertRaises(ValueError, PeriodIndex, vals)
def test_shift(self):
pi1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='A', start='1/1/2002', end='12/1/2010')
self.assertTrue(pi1.shift(0).equals(pi1))
assert_equal(len(pi1), len(pi2))
assert_equal(pi1.shift(1).values, pi2.values)
pi1 = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='A', start='1/1/2000', end='12/1/2008')
assert_equal(len(pi1), len(pi2))
assert_equal(pi1.shift(-1).values, pi2.values)
pi1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='M', start='2/1/2001', end='1/1/2010')
assert_equal(len(pi1), len(pi2))
assert_equal(pi1.shift(1).values, pi2.values)
pi1 = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='M', start='12/1/2000', end='11/1/2009')
assert_equal(len(pi1), len(pi2))
assert_equal(pi1.shift(-1).values, pi2.values)
pi1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='D', start='1/2/2001', end='12/2/2009')
assert_equal(len(pi1), len(pi2))
assert_equal(pi1.shift(1).values, pi2.values)
pi1 = PeriodIndex(freq='D', start='1/1/2001', end='12/1/2009')
pi2 = PeriodIndex(freq='D', start='12/31/2000', end='11/30/2009')
assert_equal(len(pi1), len(pi2))
assert_equal(pi1.shift(-1).values, pi2.values)
def test_shift_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M', name='idx')
result = idx.shift(1)
expected = PeriodIndex(['2011-02', '2011-03', 'NaT', '2011-05'], freq='M', name='idx')
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
def test_asfreq(self):
pi1 = PeriodIndex(freq='A', start='1/1/2001', end='1/1/2001')
pi2 = PeriodIndex(freq='Q', start='1/1/2001', end='1/1/2001')
pi3 = PeriodIndex(freq='M', start='1/1/2001', end='1/1/2001')
pi4 = PeriodIndex(freq='D', start='1/1/2001', end='1/1/2001')
pi5 = PeriodIndex(freq='H', start='1/1/2001', end='1/1/2001 00:00')
pi6 = PeriodIndex(freq='Min', start='1/1/2001', end='1/1/2001 00:00')
pi7 = PeriodIndex(freq='S', start='1/1/2001', end='1/1/2001 00:00:00')
self.assertEqual(pi1.asfreq('Q', 'S'), pi2)
self.assertEqual(pi1.asfreq('Q', 's'), pi2)
self.assertEqual(pi1.asfreq('M', 'start'), pi3)
self.assertEqual(pi1.asfreq('D', 'StarT'), pi4)
self.assertEqual(pi1.asfreq('H', 'beGIN'), pi5)
self.assertEqual(pi1.asfreq('Min', 'S'), pi6)
self.assertEqual(pi1.asfreq('S', 'S'), pi7)
self.assertEqual(pi2.asfreq('A', 'S'), pi1)
self.assertEqual(pi2.asfreq('M', 'S'), pi3)
self.assertEqual(pi2.asfreq('D', 'S'), pi4)
self.assertEqual(pi2.asfreq('H', 'S'), pi5)
self.assertEqual(pi2.asfreq('Min', 'S'), pi6)
self.assertEqual(pi2.asfreq('S', 'S'), pi7)
self.assertEqual(pi3.asfreq('A', 'S'), pi1)
self.assertEqual(pi3.asfreq('Q', 'S'), pi2)
self.assertEqual(pi3.asfreq('D', 'S'), pi4)
self.assertEqual(pi3.asfreq('H', 'S'), pi5)
self.assertEqual(pi3.asfreq('Min', 'S'), pi6)
self.assertEqual(pi3.asfreq('S', 'S'), pi7)
self.assertEqual(pi4.asfreq('A', 'S'), pi1)
self.assertEqual(pi4.asfreq('Q', 'S'), pi2)
self.assertEqual(pi4.asfreq('M', 'S'), pi3)
self.assertEqual(pi4.asfreq('H', 'S'), pi5)
self.assertEqual(pi4.asfreq('Min', 'S'), pi6)
self.assertEqual(pi4.asfreq('S', 'S'), pi7)
self.assertEqual(pi5.asfreq('A', 'S'), pi1)
self.assertEqual(pi5.asfreq('Q', 'S'), pi2)
self.assertEqual(pi5.asfreq('M', 'S'), pi3)
self.assertEqual(pi5.asfreq('D', 'S'), pi4)
self.assertEqual(pi5.asfreq('Min', 'S'), pi6)
self.assertEqual(pi5.asfreq('S', 'S'), pi7)
self.assertEqual(pi6.asfreq('A', 'S'), pi1)
self.assertEqual(pi6.asfreq('Q', 'S'), pi2)
self.assertEqual(pi6.asfreq('M', 'S'), pi3)
self.assertEqual(pi6.asfreq('D', 'S'), pi4)
self.assertEqual(pi6.asfreq('H', 'S'), pi5)
self.assertEqual(pi6.asfreq('S', 'S'), pi7)
self.assertEqual(pi7.asfreq('A', 'S'), pi1)
self.assertEqual(pi7.asfreq('Q', 'S'), pi2)
self.assertEqual(pi7.asfreq('M', 'S'), pi3)
self.assertEqual(pi7.asfreq('D', 'S'), pi4)
self.assertEqual(pi7.asfreq('H', 'S'), pi5)
self.assertEqual(pi7.asfreq('Min', 'S'), pi6)
self.assertRaises(ValueError, pi7.asfreq, 'T', 'foo')
self.assertRaises(ValueError, pi1.asfreq, '5t')
def test_asfreq_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M')
result = idx.asfreq(freq='Q')
expected = PeriodIndex(['2011Q1', '2011Q1', 'NaT', '2011Q2'], freq='Q')
self.assertTrue(result.equals(expected))
def test_period_index_length(self):
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
assert_equal(len(pi), 9)
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2009')
assert_equal(len(pi), 4 * 9)
pi = PeriodIndex(freq='M', start='1/1/2001', end='12/1/2009')
assert_equal(len(pi), 12 * 9)
start = Period('02-Apr-2005', 'B')
i1 = PeriodIndex(start=start, periods=20)
assert_equal(len(i1), 20)
assert_equal(i1.freq, start.freq)
assert_equal(i1[0], start)
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), 10)
assert_equal(i1.freq, end_intv.freq)
assert_equal(i1[-1], end_intv)
end_intv = Period('2006-12-31', '1w')
i2 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), len(i2))
self.assertTrue((i1 == i2).all())
assert_equal(i1.freq, i2.freq)
end_intv = Period('2006-12-31', ('w', 1))
i2 = PeriodIndex(end=end_intv, periods=10)
assert_equal(len(i1), len(i2))
self.assertTrue((i1 == i2).all())
assert_equal(i1.freq, i2.freq)
try:
PeriodIndex(start=start, end=end_intv)
raise AssertionError('Cannot allow mixed freq for start and end')
except ValueError:
pass
end_intv = Period('2005-05-01', 'B')
i1 = PeriodIndex(start=start, end=end_intv)
try:
PeriodIndex(start=start)
raise AssertionError(
'Must specify periods if missing start or end')
except ValueError:
pass
# infer freq from first element
i2 = PeriodIndex([end_intv, Period('2005-05-05', 'B')])
assert_equal(len(i2), 2)
assert_equal(i2[0], end_intv)
i2 = PeriodIndex(np.array([end_intv, Period('2005-05-05', 'B')]))
assert_equal(len(i2), 2)
assert_equal(i2[0], end_intv)
# Mixed freq should fail
vals = [end_intv, Period('2006-12-31', 'w')]
self.assertRaises(ValueError, PeriodIndex, vals)
vals = np.array(vals)
self.assertRaises(ValueError, PeriodIndex, vals)
def test_frame_index_to_string(self):
index = PeriodIndex(['2011-1', '2011-2', '2011-3'], freq='M')
frame = DataFrame(np.random.randn(3, 4), index=index)
# it works!
frame.to_string()
def test_asfreq_ts(self):
index = PeriodIndex(freq='A', start='1/1/2001', end='12/31/2010')
ts = Series(np.random.randn(len(index)), index=index)
df = DataFrame(np.random.randn(len(index), 3), index=index)
result = ts.asfreq('D', how='end')
df_result = df.asfreq('D', how='end')
exp_index = index.asfreq('D', how='end')
self.assertEqual(len(result), len(ts))
self.assertTrue(result.index.equals(exp_index))
self.assertTrue(df_result.index.equals(exp_index))
result = ts.asfreq('D', how='start')
self.assertEqual(len(result), len(ts))
self.assertTrue(result.index.equals(index.asfreq('D', how='start')))
def test_badinput(self):
self.assertRaises(datetools.DateParseError, Period, '1/1/-2000', 'A')
# self.assertRaises(datetools.DateParseError, Period, '-2000', 'A')
# self.assertRaises(datetools.DateParseError, Period, '0', 'A')
def test_negative_ordinals(self):
p = Period(ordinal=-1000, freq='A')
p = Period(ordinal=0, freq='A')
idx1 = PeriodIndex(ordinal=[-1, 0, 1], freq='A')
idx2 = PeriodIndex(ordinal=np.array([-1, 0, 1]), freq='A')
assert_array_equal(idx1,idx2)
def test_dti_to_period(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
pi1 = dti.to_period()
pi2 = dti.to_period(freq='D')
self.assertEqual(pi1[0], Period('Jan 2005', freq='M'))
self.assertEqual(pi2[0], Period('1/31/2005', freq='D'))
self.assertEqual(pi1[-1], Period('Nov 2005', freq='M'))
self.assertEqual(pi2[-1], Period('11/30/2005', freq='D'))
def test_pindex_slice_index(self):
pi = PeriodIndex(start='1/1/10', end='12/31/12', freq='M')
s = Series(np.random.rand(len(pi)), index=pi)
res = s['2010']
exp = s[0:12]
assert_series_equal(res, exp)
res = s['2011']
exp = s[12:24]
assert_series_equal(res, exp)
def test_getitem_day(self):
# GH 6716
# Confirm DatetimeIndex and PeriodIndex works identically
didx = DatetimeIndex(start='2013/01/01', freq='D', periods=400)
pidx = PeriodIndex(start='2013/01/01', freq='D', periods=400)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = ['2014', '2013/02', '2013/01/02',
'2013/02/01 9H', '2013/02/01 09:00']
for v in values:
if _np_version_under1p9:
with tm.assertRaises(ValueError):
idx[v]
else:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
#with tm.assertRaises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
assert_series_equal(s['2013/01'], s[0:31])
assert_series_equal(s['2013/02'], s[31:59])
assert_series_equal(s['2014'], s[365:])
invalid = ['2013/02/01 9H', '2013/02/01 09:00']
for v in invalid:
with tm.assertRaises(KeyError):
s[v]
def test_range_slice_day(self):
# GH 6716
didx = DatetimeIndex(start='2013/01/01', freq='D', periods=400)
pidx = PeriodIndex(start='2013/01/01', freq='D', periods=400)
for idx in [didx, pidx]:
# slices against index should raise IndexError
values = ['2014', '2013/02', '2013/01/02',
'2013/02/01 9H', '2013/02/01 09:00']
for v in values:
with tm.assertRaises(IndexError):
idx[v:]
s = Series(np.random.rand(len(idx)), index=idx)
assert_series_equal(s['2013/01/02':], s[1:])
assert_series_equal(s['2013/01/02':'2013/01/05'], s[1:5])
assert_series_equal(s['2013/02':], s[31:])
assert_series_equal(s['2014':], s[365:])
invalid = ['2013/02/01 9H', '2013/02/01 09:00']
for v in invalid:
with tm.assertRaises(IndexError):
idx[v:]
def test_getitem_seconds(self):
# GH 6716
didx = DatetimeIndex(start='2013/01/01 09:00:00', freq='S', periods=4000)
pidx = PeriodIndex(start='2013/01/01 09:00:00', freq='S', periods=4000)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = ['2014', '2013/02', '2013/01/02',
'2013/02/01 9H', '2013/02/01 09:00']
for v in values:
if _np_version_under1p9:
with tm.assertRaises(ValueError):
idx[v]
else:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
#with tm.assertRaises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
assert_series_equal(s['2013/01/01 10:00'], s[3600:3660])
assert_series_equal(s['2013/01/01 9H'], s[:3600])
for d in ['2013/01/01', '2013/01', '2013']:
assert_series_equal(s[d], s)
def test_range_slice_seconds(self):
# GH 6716
didx = DatetimeIndex(start='2013/01/01 09:00:00', freq='S', periods=4000)
pidx = PeriodIndex(start='2013/01/01 09:00:00', freq='S', periods=4000)
for idx in [didx, pidx]:
# slices against index should raise IndexError
values = ['2014', '2013/02', '2013/01/02',
'2013/02/01 9H', '2013/02/01 09:00']
for v in values:
with tm.assertRaises(IndexError):
idx[v:]
s = Series(np.random.rand(len(idx)), index=idx)
assert_series_equal(s['2013/01/01 09:05':'2013/01/01 09:10'], s[300:660])
assert_series_equal(s['2013/01/01 10:00':'2013/01/01 10:05'], s[3600:3960])
assert_series_equal(s['2013/01/01 10H':], s[3600:])
assert_series_equal(s[:'2013/01/01 09:30'], s[:1860])
for d in ['2013/01/01', '2013/01', '2013']:
assert_series_equal(s[d:], s)
def test_range_slice_outofbounds(self):
# GH 5407
didx = DatetimeIndex(start='2013/10/01', freq='D', periods=10)
pidx = PeriodIndex(start='2013/10/01', freq='D', periods=10)
for idx in [didx, pidx]:
df = DataFrame(dict(units=[100 + i for i in range(10)]), index=idx)
empty = DataFrame(index=idx.__class__([], freq='D'), columns=['units'])
tm.assert_frame_equal(df['2013/09/01':'2013/09/30'], empty)
tm.assert_frame_equal(df['2013/09/30':'2013/10/02'], df.iloc[:2])
tm.assert_frame_equal(df['2013/10/01':'2013/10/02'], df.iloc[:2])
tm.assert_frame_equal(df['2013/10/02':'2013/09/30'], empty)
tm.assert_frame_equal(df['2013/10/15':'2013/10/17'], empty)
tm.assert_frame_equal(df['2013-06':'2013-09'], empty)
tm.assert_frame_equal(df['2013-11':'2013-12'], empty)
def test_pindex_fieldaccessor_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2012-03', '2012-04'], freq='D')
self.assert_numpy_array_equal(idx.year, np.array([2011, 2011, -1, 2012, 2012]))
self.assert_numpy_array_equal(idx.month, np.array([1, 2, -1, 3, 4]))
def test_pindex_qaccess(self):
pi = PeriodIndex(['2Q05', '3Q05', '4Q05', '1Q06', '2Q06'], freq='Q')
s = Series(np.random.rand(len(pi)), index=pi).cumsum()
# Todo: fix these accessors!
self.assertEqual(s['05Q4'], s[2])
def test_period_dt64_round_trip(self):
dti = date_range('1/1/2000', '1/7/2002', freq='B')
pi = dti.to_period()
self.assertTrue(pi.to_timestamp().equals(dti))
dti = date_range('1/1/2000', '1/7/2002', freq='B')
pi = dti.to_period(freq='H')
self.assertTrue(pi.to_timestamp().equals(dti))
def test_to_period_quarterly(self):
# make sure we can make the round trip
for month in MONTHS:
freq = 'Q-%s' % month
rng = period_range('1989Q3', '1991Q3', freq=freq)
stamps = rng.to_timestamp()
result = stamps.to_period(freq)
self.assertTrue(rng.equals(result))
def test_to_period_quarterlyish(self):
offsets = ['BQ', 'QS', 'BQS']
for off in offsets:
rng = date_range('01-Jan-2012', periods=8, freq=off)
prng = rng.to_period()
self.assertEqual(prng.freq, 'Q-DEC')
def test_to_period_annualish(self):
offsets = ['BA', 'AS', 'BAS']
for off in offsets:
rng = date_range('01-Jan-2012', periods=8, freq=off)
prng = rng.to_period()
self.assertEqual(prng.freq, 'A-DEC')
def test_to_period_monthish(self):
offsets = ['MS', 'EOM', 'BM']
for off in offsets:
rng = date_range('01-Jan-2012', periods=8, freq=off)
prng = rng.to_period()
self.assertEqual(prng.freq, 'M')
def test_no_multiples(self):
self.assertRaises(ValueError, period_range, '1989Q3', periods=10,
freq='2Q')
self.assertRaises(ValueError, period_range, '1989', periods=10,
freq='2A')
self.assertRaises(ValueError, Period, '1989', freq='2A')
# def test_pindex_multiples(self):
# pi = PeriodIndex(start='1/1/10', end='12/31/12', freq='2M')
# self.assertEqual(pi[0], Period('1/1/10', '2M'))
# self.assertEqual(pi[1], Period('3/1/10', '2M'))
# self.assertEqual(pi[0].asfreq('6M'), pi[2].asfreq('6M'))
# self.assertEqual(pi[0].asfreq('A'), pi[2].asfreq('A'))
# self.assertEqual(pi[0].asfreq('M', how='S'),
# Period('Jan 2010', '1M'))
# self.assertEqual(pi[0].asfreq('M', how='E'),
# Period('Feb 2010', '1M'))
# self.assertEqual(pi[1].asfreq('M', how='S'),
# Period('Mar 2010', '1M'))
# i = Period('1/1/2010 12:05:18', '5S')
# self.assertEqual(i, Period('1/1/2010 12:05:15', '5S'))
# i = Period('1/1/2010 12:05:18', '5S')
# self.assertEqual(i.asfreq('1S', how='E'),
# Period('1/1/2010 12:05:19', '1S'))
def test_iteration(self):
index = PeriodIndex(start='1/1/10', periods=4, freq='B')
result = list(index)
tm.assert_isinstance(result[0], Period)
self.assertEqual(result[0].freq, index.freq)
def test_take(self):
index = PeriodIndex(start='1/1/10', end='12/31/12', freq='D', name='idx')
expected = PeriodIndex([datetime(2010, 1, 6), datetime(2010, 1, 7),
datetime(2010, 1, 9), datetime(2010, 1, 13)],
freq='D', name='idx')
taken1 = index.take([5, 6, 8, 12])
taken2 = index[[5, 6, 8, 12]]
for taken in [taken1, taken2]:
self.assertTrue(taken.equals(expected))
tm.assert_isinstance(taken, PeriodIndex)
self.assertEqual(taken.freq, index.freq)
self.assertEqual(taken.name, expected.name)
def test_joins(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
for kind in ['inner', 'outer', 'left', 'right']:
joined = index.join(index[:-5], how=kind)
tm.assert_isinstance(joined, PeriodIndex)
self.assertEqual(joined.freq, index.freq)
def test_join_self(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
for kind in ['inner', 'outer', 'left', 'right']:
res = index.join(index, how=kind)
self.assertIs(index, res)
def test_join_does_not_recur(self):
df = tm.makeCustomDataframe(3, 2, data_gen_f=lambda *args:
np.random.randint(2), c_idx_type='p',
r_idx_type='dt')
s = df.iloc[:2, 0]
res = s.index.join(df.columns, how='outer')
expected = Index([s.index[0], s.index[1],
df.columns[0], df.columns[1]], object)
tm.assert_index_equal(res, expected)
def test_align_series(self):
rng = period_range('1/1/2000', '1/1/2010', freq='A')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected[1::2] = np.nan
assert_series_equal(result, expected)
result = ts + _permute(ts[::2])
assert_series_equal(result, expected)
# it works!
for kind in ['inner', 'outer', 'left', 'right']:
ts.align(ts[::2], join=kind)
with assertRaisesRegexp(ValueError, 'Only like-indexed'):
ts + ts.asfreq('D', how="end")
def test_align_frame(self):
rng = period_range('1/1/2000', '1/1/2010', freq='A')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.values[1::2] = np.nan
tm.assert_frame_equal(result, expected)
result = ts + _permute(ts[::2])
tm.assert_frame_equal(result, expected)
def test_union(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
result = index[:-5].union(index[10:])
self.assertTrue(result.equals(index))
# not in order
result = _permute(index[:-5]).union(_permute(index[10:]))
self.assertTrue(result.equals(index))
# raise if different frequencies
index = period_range('1/1/2000', '1/20/2000', freq='D')
index2 = period_range('1/1/2000', '1/20/2000', freq='W-WED')
self.assertRaises(ValueError, index.union, index2)
self.assertRaises(ValueError, index.join, index.to_timestamp())
def test_intersection(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
result = index[:-5].intersection(index[10:])
self.assertTrue(result.equals(index[10:-5]))
# not in order
left = _permute(index[:-5])
right = _permute(index[10:])
result = left.intersection(right).order()
self.assertTrue(result.equals(index[10:-5]))
# raise if different frequencies
index = period_range('1/1/2000', '1/20/2000', freq='D')
index2 = period_range('1/1/2000', '1/20/2000', freq='W-WED')
self.assertRaises(ValueError, index.intersection, index2)
def test_fields(self):
# year, month, day, hour, minute
# second, weekofyear, week, dayofweek, weekday, dayofyear, quarter
# qyear
pi = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2005')
self._check_all_fields(pi)
pi = PeriodIndex(freq='Q', start='1/1/2001', end='12/1/2002')
self._check_all_fields(pi)
pi = PeriodIndex(freq='M', start='1/1/2001', end='1/1/2002')
self._check_all_fields(pi)
pi = PeriodIndex(freq='D', start='12/1/2001', end='6/1/2001')
self._check_all_fields(pi)
pi = PeriodIndex(freq='B', start='12/1/2001', end='6/1/2001')
self._check_all_fields(pi)
pi = PeriodIndex(freq='H', start='12/31/2001', end='1/1/2002 23:00')
self._check_all_fields(pi)
pi = PeriodIndex(freq='Min', start='12/31/2001', end='1/1/2002 00:20')
self._check_all_fields(pi)
pi = PeriodIndex(freq='S', start='12/31/2001 00:00:00',
end='12/31/2001 00:05:00')
self._check_all_fields(pi)
end_intv = Period('2006-12-31', 'W')
i1 = PeriodIndex(end=end_intv, periods=10)
self._check_all_fields(i1)
def _check_all_fields(self, periodindex):
fields = ['year', 'month', 'day', 'hour', 'minute',
'second', 'weekofyear', 'week', 'dayofweek',
'weekday', 'dayofyear', 'quarter', 'qyear', 'days_in_month']
periods = list(periodindex)
for field in fields:
field_idx = getattr(periodindex, field)
assert_equal(len(periodindex), len(field_idx))
for x, val in zip(periods, field_idx):
assert_equal(getattr(x, field), val)
def test_is_full(self):
index = PeriodIndex([2005, 2007, 2009], freq='A')
self.assertFalse(index.is_full)
index = PeriodIndex([2005, 2006, 2007], freq='A')
self.assertTrue(index.is_full)
index = PeriodIndex([2005, 2005, 2007], freq='A')
self.assertFalse(index.is_full)
index = PeriodIndex([2005, 2005, 2006], freq='A')
self.assertTrue(index.is_full)
index = PeriodIndex([2006, 2005, 2005], freq='A')
self.assertRaises(ValueError, getattr, index, 'is_full')
self.assertTrue(index[:0].is_full)
def test_map(self):
index = PeriodIndex([2005, 2007, 2009], freq='A')
result = index.map(lambda x: x + 1)
expected = index + 1
self.assertTrue(result.equals(expected))
result = index.map(lambda x: x.ordinal)
exp = [x.ordinal for x in index]
assert_array_equal(result, exp)
def test_map_with_string_constructor(self):
raw = [2005, 2007, 2009]
index = PeriodIndex(raw, freq='A')
types = str,
if compat.PY3:
# unicode
types += compat.text_type,
for t in types:
expected = np.array(lmap(t, raw), dtype=object)
res = index.map(t)
# should return an array
tm.assert_isinstance(res, np.ndarray)
# preserve element types
self.assertTrue(all(isinstance(resi, t) for resi in res))
# dtype should be object
self.assertEqual(res.dtype, np.dtype('object').type)
# lastly, values should compare equal
assert_array_equal(res, expected)
def test_convert_array_of_periods(self):
rng = period_range('1/1/2000', periods=20, freq='D')
periods = list(rng)
result = pd.Index(periods)
tm.assert_isinstance(result, PeriodIndex)
def test_with_multi_index(self):
# #1705
index = date_range('1/1/2012', periods=4, freq='12H')
index_as_arrays = [index.to_period(freq='D'), index.hour]
s = Series([0, 1, 2, 3], index_as_arrays)
tm.assert_isinstance(s.index.levels[0], PeriodIndex)
tm.assert_isinstance(s.index.values[0][0], Period)
def test_to_datetime_1703(self):
index = period_range('1/1/2012', periods=4, freq='D')
result = index.to_datetime()
self.assertEqual(result[0], Timestamp('1/1/2012'))
def test_get_loc_msg(self):
idx = period_range('2000-1-1', freq='A', periods=10)
bad_period = Period('2012', 'A')
self.assertRaises(KeyError, idx.get_loc, bad_period)
try:
idx.get_loc(bad_period)
except KeyError as inst:
self.assertEqual(inst.args[0], bad_period)
def test_append_concat(self):
# #1815
d1 = date_range('12/31/1990', '12/31/1999', freq='A-DEC')
d2 = date_range('12/31/2000', '12/31/2009', freq='A-DEC')
s1 = Series(np.random.randn(10), d1)
s2 = Series(np.random.randn(10), d2)
s1 = s1.to_period()
s2 = s2.to_period()
# drops index
result = pd.concat([s1, s2])
tm.assert_isinstance(result.index, PeriodIndex)
self.assertEqual(result.index[0], s1.index[0])
def test_pickle_freq(self):
# GH2891
import pickle
prng = period_range('1/1/2011', '1/1/2012', freq='M')
new_prng = self.round_trip_pickle(prng)
self.assertEqual(new_prng.freq,'M')
def test_slice_keep_name(self):
idx = period_range('20010101', periods=10, freq='D', name='bob')
self.assertEqual(idx.name, idx[1:].name)
def test_factorize(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
exp_arr = np.array([0, 0, 1, 1, 2, 2])
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
idx2 = pd.PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
exp_arr = np.array([2, 2, 1, 0, 2, 0])
arr, idx = idx2.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
exp_arr = np.array([0, 0, 1, 2, 0, 2])
exp_idx = PeriodIndex(['2014-03', '2014-02', '2014-01'], freq='M')
arr, idx = idx2.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
def test_recreate_from_data(self):
for o in ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'N', 'H']:
org = PeriodIndex(start='2001/04/01', freq=o, periods=1)
idx = PeriodIndex(org.values, freq=o)
self.assertTrue(idx.equals(org))
def test_combine_first(self):
# GH 3367
didx = pd.DatetimeIndex(start='1950-01-31', end='1950-07-31', freq='M')
pidx = pd.PeriodIndex(start=pd.Period('1950-1'), end=pd.Period('1950-7'), freq='M')
# check to be consistent with DatetimeIndex
for idx in [didx, pidx]:
a = pd.Series([1, np.nan, np.nan, 4, 5, np.nan, 7], index=idx)
b = pd.Series([9, 9, 9, 9, 9, 9, 9], index=idx)
result = a.combine_first(b)
expected = pd.Series([1, 9, 9, 4, 5, 9, 7], index=idx, dtype=np.float64)
tm.assert_series_equal(result, expected)
def test_searchsorted(self):
pidx = pd.period_range('2014-01-01', periods=10, freq='D')
self.assertEqual(
pidx.searchsorted(pd.Period('2014-01-01', freq='D')), 0)
self.assertRaisesRegexp(
ValueError, 'Different period frequency: H',
lambda: pidx.searchsorted(pd.Period('2014-01-01', freq='H')))
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestMethods(tm.TestCase):
"Base test class for MaskedArrays."
def test_add(self):
dt1 = Period(freq='D', year=2008, month=1, day=1)
dt2 = Period(freq='D', year=2008, month=1, day=2)
assert_equal(dt1 + 1, dt2)
#
# GH 4731
msg = "unsupported operand type\(s\)"
with tm.assertRaisesRegexp(TypeError, msg):
dt1 + "str"
with tm.assertRaisesRegexp(TypeError, msg):
dt1 + dt2
def test_add_offset(self):
# freq is DateOffset
p = Period('2011', freq='A')
self.assertEqual(p + offsets.YearEnd(2), Period('2013', freq='A'))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
p + o
p = Period('2011-03', freq='M')
self.assertEqual(p + offsets.MonthEnd(2), Period('2011-05', freq='M'))
self.assertEqual(p + offsets.MonthEnd(12), Period('2012-03', freq='M'))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
p + o
# freq is Tick
p = Period('2011-04-01', freq='D')
self.assertEqual(p + offsets.Day(5), Period('2011-04-06', freq='D'))
self.assertEqual(p + offsets.Hour(24), Period('2011-04-02', freq='D'))
self.assertEqual(p + np.timedelta64(2, 'D'), Period('2011-04-03', freq='D'))
self.assertEqual(p + np.timedelta64(3600 * 24, 's'), Period('2011-04-02', freq='D'))
self.assertEqual(p + timedelta(-2), Period('2011-03-30', freq='D'))
self.assertEqual(p + timedelta(hours=48), Period('2011-04-03', freq='D'))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23)]:
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
p + o
p = Period('2011-04-01 09:00', freq='H')
self.assertEqual(p + offsets.Day(2), Period('2011-04-03 09:00', freq='H'))
self.assertEqual(p + offsets.Hour(3), Period('2011-04-01 12:00', freq='H'))
self.assertEqual(p + np.timedelta64(3, 'h'), Period('2011-04-01 12:00', freq='H'))
self.assertEqual(p + np.timedelta64(3600, 's'), Period('2011-04-01 10:00', freq='H'))
self.assertEqual(p + timedelta(minutes=120), Period('2011-04-01 11:00', freq='H'))
self.assertEqual(p + timedelta(days=4, minutes=180), Period('2011-04-05 12:00', freq='H'))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]:
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
p + o
def test_add_offset_nat(self):
# freq is DateOffset
p = Period('NaT', freq='A')
for o in [offsets.YearEnd(2)]:
self.assertEqual((p + o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p + o
p = Period('NaT', freq='M')
for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]:
self.assertEqual((p + o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
p + o
# freq is Tick
p = Period('NaT', freq='D')
for o in [offsets.Day(5), offsets.Hour(24), np.timedelta64(2, 'D'),
np.timedelta64(3600 * 24, 's'), timedelta(-2), timedelta(hours=48)]:
self.assertEqual((p + o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23)]:
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
p + o
p = Period('NaT', freq='H')
for o in [offsets.Day(2), offsets.Hour(3), np.timedelta64(3, 'h'),
np.timedelta64(3600, 's'), timedelta(minutes=120),
timedelta(days=4, minutes=180)]:
self.assertEqual((p + o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]:
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
p + o
def test_sub_offset(self):
# freq is DateOffset
p = Period('2011', freq='A')
self.assertEqual(p - offsets.YearEnd(2), Period('2009', freq='A'))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p - o
p = Period('2011-03', freq='M')
self.assertEqual(p - offsets.MonthEnd(2), Period('2011-01', freq='M'))
self.assertEqual(p - offsets.MonthEnd(12), Period('2010-03', freq='M'))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p - o
# freq is Tick
p = Period('2011-04-01', freq='D')
self.assertEqual(p - offsets.Day(5), Period('2011-03-27', freq='D'))
self.assertEqual(p - offsets.Hour(24), Period('2011-03-31', freq='D'))
self.assertEqual(p - np.timedelta64(2, 'D'), Period('2011-03-30', freq='D'))
self.assertEqual(p - np.timedelta64(3600 * 24, 's'), Period('2011-03-31', freq='D'))
self.assertEqual(p - timedelta(-2), Period('2011-04-03', freq='D'))
self.assertEqual(p - timedelta(hours=48), Period('2011-03-30', freq='D'))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23)]:
with tm.assertRaises(ValueError):
p - o
p = Period('2011-04-01 09:00', freq='H')
self.assertEqual(p - offsets.Day(2), Period('2011-03-30 09:00', freq='H'))
self.assertEqual(p - offsets.Hour(3), Period('2011-04-01 06:00', freq='H'))
self.assertEqual(p - np.timedelta64(3, 'h'), Period('2011-04-01 06:00', freq='H'))
self.assertEqual(p - np.timedelta64(3600, 's'), Period('2011-04-01 08:00', freq='H'))
self.assertEqual(p - timedelta(minutes=120), Period('2011-04-01 07:00', freq='H'))
self.assertEqual(p - timedelta(days=4, minutes=180), Period('2011-03-28 06:00', freq='H'))
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]:
with tm.assertRaises(ValueError):
p - o
def test_sub_offset_nat(self):
# freq is DateOffset
p = Period('NaT', freq='A')
for o in [offsets.YearEnd(2)]:
self.assertEqual((p - o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p - o
p = Period('NaT', freq='M')
for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]:
self.assertEqual((p - o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
with tm.assertRaises(ValueError):
p - o
# freq is Tick
p = Period('NaT', freq='D')
for o in [offsets.Day(5), offsets.Hour(24), np.timedelta64(2, 'D'),
np.timedelta64(3600 * 24, 's'), timedelta(-2), timedelta(hours=48)]:
self.assertEqual((p - o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23)]:
with tm.assertRaises(ValueError):
p - o
p = Period('NaT', freq='H')
for o in [offsets.Day(2), offsets.Hour(3), np.timedelta64(3, 'h'),
np.timedelta64(3600, 's'), timedelta(minutes=120),
timedelta(days=4, minutes=180)]:
self.assertEqual((p - o).ordinal, tslib.iNaT)
for o in [offsets.YearBegin(2), offsets.MonthBegin(1), offsets.Minute(),
np.timedelta64(3200, 's'), timedelta(hours=23, minutes=30)]:
with tm.assertRaises(ValueError):
p - o
def test_nat_ops(self):
p = Period('NaT', freq='M')
self.assertEqual((p + 1).ordinal, tslib.iNaT)
self.assertEqual((p - 1).ordinal, tslib.iNaT)
self.assertEqual((p - Period('2011-01', freq='M')).ordinal, tslib.iNaT)
self.assertEqual((Period('2011-01', freq='M') - p).ordinal, tslib.iNaT)
def test_pi_ops_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-04'], freq='M', name='idx')
result = idx + 2
expected = PeriodIndex(['2011-03', '2011-04', 'NaT', '2011-06'], freq='M', name='idx')
self.assertTrue(result.equals(expected))
result2 = result - 2
self.assertTrue(result2.equals(idx))
msg = "unsupported operand type\(s\)"
with tm.assertRaisesRegexp(TypeError, msg):
idx + "str"
class TestPeriodRepresentation(tm.TestCase):
"""
Wish to match NumPy units
"""
def test_annual(self):
self._check_freq('A', 1970)
def test_monthly(self):
self._check_freq('M', '1970-01')
def test_weekly(self):
self._check_freq('W-THU', '1970-01-01')
def test_daily(self):
self._check_freq('D', '1970-01-01')
def test_business_daily(self):
self._check_freq('B', '1970-01-01')
def test_hourly(self):
self._check_freq('H', '1970-01-01')
def test_minutely(self):
self._check_freq('T', '1970-01-01')
def test_secondly(self):
self._check_freq('S', '1970-01-01')
def test_millisecondly(self):
self._check_freq('L', '1970-01-01')
def test_microsecondly(self):
self._check_freq('U', '1970-01-01')
def test_nanosecondly(self):
self._check_freq('N', '1970-01-01')
def _check_freq(self, freq, base_date):
rng = PeriodIndex(start=base_date, periods=10, freq=freq)
exp = np.arange(10, dtype=np.int64)
self.assert_numpy_array_equal(rng.values, exp)
def test_negone_ordinals(self):
freqs = ['A', 'M', 'Q', 'D', 'H', 'T', 'S']
period = Period(ordinal=-1, freq='D')
for freq in freqs:
repr(period.asfreq(freq))
for freq in freqs:
period = Period(ordinal=-1, freq=freq)
repr(period)
self.assertEqual(period.year, 1969)
period = Period(ordinal=-1, freq='B')
repr(period)
period = Period(ordinal=-1, freq='W')
repr(period)
class TestComparisons(tm.TestCase):
def setUp(self):
self.january1 = Period('2000-01', 'M')
self.january2 = Period('2000-01', 'M')
self.february = Period('2000-02', 'M')
self.march = Period('2000-03', 'M')
self.day = Period('2012-01-01', 'D')
def test_equal(self):
self.assertEqual(self.january1, self.january2)
def test_equal_Raises_Value(self):
with tm.assertRaises(ValueError):
self.january1 == self.day
def test_notEqual(self):
self.assertNotEqual(self.january1, 1)
self.assertNotEqual(self.january1, self.february)
def test_greater(self):
self.assertTrue(self.february > self.january1)
def test_greater_Raises_Value(self):
with tm.assertRaises(ValueError):
self.january1 > self.day
def test_greater_Raises_Type(self):
with tm.assertRaises(TypeError):
self.january1 > 1
def test_greaterEqual(self):
self.assertTrue(self.january1 >= self.january2)
def test_greaterEqual_Raises_Value(self):
with tm.assertRaises(ValueError):
self.january1 >= self.day
with tm.assertRaises(TypeError):
print(self.january1 >= 1)
def test_smallerEqual(self):
self.assertTrue(self.january1 <= self.january2)
def test_smallerEqual_Raises_Value(self):
with tm.assertRaises(ValueError):
self.january1 <= self.day
def test_smallerEqual_Raises_Type(self):
with tm.assertRaises(TypeError):
self.january1 <= 1
def test_smaller(self):
self.assertTrue(self.january1 < self.february)
def test_smaller_Raises_Value(self):
with tm.assertRaises(ValueError):
self.january1 < self.day
def test_smaller_Raises_Type(self):
with tm.assertRaises(TypeError):
self.january1 < 1
def test_sort(self):
periods = [self.march, self.january1, self.february]
correctPeriods = [self.january1, self.february, self.march]
self.assertEqual(sorted(periods), correctPeriods)
def test_period_nat_comp(self):
p_nat = Period('NaT', freq='D')
p = Period('2011-01-01', freq='D')
nat = pd.Timestamp('NaT')
t = pd.Timestamp('2011-01-01')
# confirm Period('NaT') work identical with Timestamp('NaT')
for left, right in [(p_nat, p), (p, p_nat), (p_nat, p_nat),
(nat, t), (t, nat), (nat, nat)]:
self.assertEqual(left < right, False)
self.assertEqual(left > right, False)
self.assertEqual(left == right, False)
self.assertEqual(left != right, True)
self.assertEqual(left <= right, False)
self.assertEqual(left >= right, False)
def test_pi_nat_comp(self):
idx1 = PeriodIndex(['2011-01', '2011-02', 'NaT', '2011-05'], freq='M')
result = idx1 > Period('2011-02', freq='M')
self.assert_numpy_array_equal(result, np.array([False, False, False, True]))
result = idx1 == Period('NaT', freq='M')
self.assert_numpy_array_equal(result, np.array([False, False, False, False]))
result = idx1 != Period('NaT', freq='M')
self.assert_numpy_array_equal(result, np.array([True, True, True, True]))
idx2 = PeriodIndex(['2011-02', '2011-01', '2011-04', 'NaT'], freq='M')
result = idx1 < idx2
self.assert_numpy_array_equal(result, np.array([True, False, False, False]))
result = idx1 == idx1
self.assert_numpy_array_equal(result, np.array([True, True, False, True]))
result = idx1 != idx1
self.assert_numpy_array_equal(result, np.array([False, False, True, False]))
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-2.0 |
mattgiguere/scikit-learn | examples/neighbors/plot_digits_kde_sampling.py | 251 | 2022 | """
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
| bsd-3-clause |
KevinTarhan/simulate_random_points | point_simulation.py | 1 | 6628 | import numpy as np
from qtpy import QtWidgets
import flika
flika_version = flika.__version__
from flika import global_vars as g
from flika.process.BaseProcess import BaseProcess_noPriorWindow, SliderLabel, CheckBox
from flika.process.file_ import save_file_gui, open_file_gui
from qtpy.QtWidgets import *
from qtpy.QtCore import *
import os.path
import matplotlib.pyplot as plt
from matplotlib import path
from .neighbor_dist import distances
from flika.logger import logger
from matplotlib.widgets import LassoSelector
def save_text_file(text, filename=None):
"""save_text_file(text, filename=None)
Save a string to a text file
Parameters:
filename (str): Text will be saved here
text (str): Text to be saved
Returns:
Tuple with directory filename is stored in and filename's location
"""
if filename is None or filename is False:
filetypes = '*.txt'
prompt = 'Save File As Txt'
filename = save_file_gui(prompt, filetypes=filetypes)
if filename is None:
return None
g.m.statusBar().showMessage('Saving Points in {}'.format(os.path.basename(filename)))
file = open(filename, 'w')
file.write(text)
file.close()
g.m.statusBar().showMessage('Successfully saved {}'.format(os.path.basename(filename)))
directory = os.path.dirname(filename)
return directory, filename
def get_text_file(filename=None):
if filename is None:
filetypes = '.txt'
prompt = 'Open File'
filename = open_file_gui(prompt, filetypes=filetypes)
if filename is None:
return None
else:
filename = g.settings['filename']
if filename is None:
g.alert('No filename selected')
return None
print("Filename: {}".format(filename))
g.m.statusBar().showMessage('Loading {}'.format(os.path.basename(filename)))
return filename
def bounded_point_sim(width,height,numpoints,boundaries, pixel_scale, display_graphs):
random_x = []
random_y = []
export_txt = ''
np_bounds = np.loadtxt(boundaries, skiprows=1)
comparison_path = path.Path(np_bounds)
count = 0
while count < numpoints:
potential_x = np.random.uniform(0, width, 1)
potential_y = np.random.uniform(0, height, 1)
point_array = np.array([potential_x, potential_y]).reshape(1, 2)
if comparison_path.contains_points(point_array):
export_txt += str(potential_x[0] * pixel_scale) + ' ' + str(potential_y[0] * pixel_scale) + '\n'
random_x.append(potential_x)
random_y.append(potential_y)
count += 1
random_x = np.array(random_x)
random_y = np.array(random_y)
ret = save_plot_points(random_x, random_y, display_graphs)
if ret == QMessageBox.Save:
r = save_text_file(export_txt)
compute_neighbor_distance(r[0], r[1], pixel_scale, display_graphs)
else:
return
def unbounded_point_sim(width,height,numpoints,pixel_scale,display_graphs):
export_txt = ''
random_x = np.random.uniform(0, width, numpoints)
random_y = np.random.uniform(0, height, numpoints)
for i, k in zip(random_x, random_y):
export_txt += str(i * pixel_scale) + ' ' + str(k * pixel_scale) + '\n'
ret = save_plot_points(random_x, random_y,display_graphs)
if ret == QMessageBox.Save:
r = save_text_file(export_txt)
compute_neighbor_distance(r[0], r[1], pixel_scale, display_graphs)
else:
return
def save_plot_points(x,y,display_graphs, area=5):
if display_graphs:
plt.scatter(x, y, s=area)
plt.show()
save_box = QMessageBox()
save_box.setWindowTitle('Save?')
save_box.setText('Save (x,y) coordinates of scatter plot? This is necessary to compute nearest neighbors.')
save_box.setStandardButtons(QMessageBox.Save | QMessageBox.Cancel)
save_box.setDefaultButton(QMessageBox.Save)
ret = save_box.exec_()
return ret
def compute_neighbor_distance(base_directory, file_directory, pixel_scale, display_graphs):
save_box = QWidget()
full_dir = r'{file}'.format(file=file_directory)
file_output, ok = QInputDialog.getText(save_box, "Neighbor Distance", """Output filename?""")
if not ok:
return
output_dir = r'{dir}/{file}.txt'.format(dir=base_directory, file=file_output)
logger.debug(full_dir)
distances(full_dir, full_dir, output_dir, pixel_scale, display_graphs)
class PointSim(BaseProcess_noPriorWindow):
def __init__(self):
super().__init__()
self.__name__ = self.__class__.__name__
def get_init_settings_dict(self):
s = dict()
s['window_width'] = 200
s['window_height'] = 200
s['num_points'] = 10000
s['pixel_scale'] = .532
return s
def gui(self):
self.gui_reset()
window_width = SliderLabel()
window_width.setRange(1, 2000)
window_height = SliderLabel()
window_height.setRange(1, 2000)
num_points = SliderLabel()
num_points.setRange(1, 13000)
pixel_scale = QtWidgets.QDoubleSpinBox()
pixel_scale.setDecimals(3)
pixel_scale.setSingleStep(.001)
load_ROI = CheckBox()
display_graphs = CheckBox()
self.items.append({'name': 'window_width', 'string': 'Window Width', 'object': window_width})
self.items.append({'name': 'window_height', 'string': 'Window Height', 'object': window_height})
self.items.append({'name': 'num_points', 'string': 'Number of Points', 'object': num_points})
self.items.append({'name': 'pixel_scale', 'string': 'Microns per Pixel', 'object': pixel_scale})
self.items.append({'name': 'load_ROI', 'string': 'Load ROI?', 'object': load_ROI})
self.items.append({'name': 'display_graphs', 'string': 'Display Graphs?', 'object': display_graphs})
super().gui()
self.ui.setGeometry(QRect(400, 50, 600, 130))
def __call__(self, window_width=200, window_height=200, num_points=10000, pixel_scale=.532, load_ROI = False, display_graphs=True):
try:
if pixel_scale == 0:
pixel_scale = 1
self.start()
if load_ROI:
boundaries = get_text_file()
bounded_point_sim(window_width, window_height, num_points, boundaries, pixel_scale,display_graphs)
else:
unbounded_point_sim(window_width, window_height, num_points, pixel_scale, display_graphs)
except TypeError:
return
PointSim = PointSim()
| mit |
s20121035/rk3288_android5.1_repo | cts/apps/CameraITS/tests/scene0/test_gyro_bias.py | 3 | 2534 | # Copyright 2014 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import its.image
import its.caps
import its.device
import its.objects
import its.target
import time
import pylab
import os.path
import matplotlib
import matplotlib.pyplot
import numpy
def main():
"""Test if the gyro has stable output when device is stationary.
"""
NAME = os.path.basename(__file__).split(".")[0]
# Number of samples averaged together, in the plot.
N = 20
# Pass/fail thresholds for gyro drift
MEAN_THRESH = 0.01
VAR_THRESH = 0.001
with its.device.ItsSession() as cam:
props = cam.get_camera_properties()
# Only run test if the appropriate caps are claimed.
its.caps.skip_unless(its.caps.sensor_fusion(props))
print "Collecting gyro events"
cam.start_sensor_events()
time.sleep(5)
gyro_events = cam.get_sensor_events()["gyro"]
nevents = (len(gyro_events) / N) * N
gyro_events = gyro_events[:nevents]
times = numpy.array([(e["time"] - gyro_events[0]["time"])/1000000000.0
for e in gyro_events])
xs = numpy.array([e["x"] for e in gyro_events])
ys = numpy.array([e["y"] for e in gyro_events])
zs = numpy.array([e["z"] for e in gyro_events])
# Group samples into size-N groups and average each together, to get rid
# of individual random spikes in the data.
times = times[N/2::N]
xs = xs.reshape(nevents/N, N).mean(1)
ys = ys.reshape(nevents/N, N).mean(1)
zs = zs.reshape(nevents/N, N).mean(1)
pylab.plot(times, xs, 'r', label="x")
pylab.plot(times, ys, 'g', label="y")
pylab.plot(times, zs, 'b', label="z")
pylab.xlabel("Time (seconds)")
pylab.ylabel("Gyro readings (mean of %d samples)"%(N))
pylab.legend()
matplotlib.pyplot.savefig("%s_plot.png" % (NAME))
for samples in [xs,ys,zs]:
assert(samples.mean() < MEAN_THRESH)
assert(numpy.var(samples) < VAR_THRESH)
if __name__ == '__main__':
main()
| gpl-3.0 |
rs2/pandas | pandas/io/parquet.py | 1 | 12428 | """ parquet compat """
from typing import Any, AnyStr, Dict, List, Optional
from warnings import catch_warnings
from pandas._typing import FilePathOrBuffer, StorageOptions
from pandas.compat._optional import import_optional_dependency
from pandas.errors import AbstractMethodError
from pandas import DataFrame, get_option
from pandas.io.common import get_filepath_or_buffer, is_fsspec_url, stringify_path
def get_engine(engine: str) -> "BaseImpl":
""" return our implementation """
if engine == "auto":
engine = get_option("io.parquet.engine")
if engine == "auto":
# try engines in this order
engine_classes = [PyArrowImpl, FastParquetImpl]
error_msgs = ""
for engine_class in engine_classes:
try:
return engine_class()
except ImportError as err:
error_msgs += "\n - " + str(err)
raise ImportError(
"Unable to find a usable engine; "
"tried using: 'pyarrow', 'fastparquet'.\n"
"A suitable version of "
"pyarrow or fastparquet is required for parquet "
"support.\n"
"Trying to import the above resulted in these errors:"
f"{error_msgs}"
)
if engine == "pyarrow":
return PyArrowImpl()
elif engine == "fastparquet":
return FastParquetImpl()
raise ValueError("engine must be one of 'pyarrow', 'fastparquet'")
class BaseImpl:
@staticmethod
def validate_dataframe(df: DataFrame):
if not isinstance(df, DataFrame):
raise ValueError("to_parquet only supports IO with DataFrames")
# must have value column names (strings only)
if df.columns.inferred_type not in {"string", "empty"}:
raise ValueError("parquet must have string column names")
# index level names must be strings
valid_names = all(
isinstance(name, str) for name in df.index.names if name is not None
)
if not valid_names:
raise ValueError("Index level names must be strings")
def write(self, df: DataFrame, path, compression, **kwargs):
raise AbstractMethodError(self)
def read(self, path, columns=None, **kwargs):
raise AbstractMethodError(self)
class PyArrowImpl(BaseImpl):
def __init__(self):
import_optional_dependency(
"pyarrow", extra="pyarrow is required for parquet support."
)
import pyarrow.parquet
# import utils to register the pyarrow extension types
import pandas.core.arrays._arrow_utils # noqa
self.api = pyarrow
def write(
self,
df: DataFrame,
path: FilePathOrBuffer[AnyStr],
compression: Optional[str] = "snappy",
index: Optional[bool] = None,
storage_options: StorageOptions = None,
partition_cols: Optional[List[str]] = None,
**kwargs,
):
self.validate_dataframe(df)
from_pandas_kwargs: Dict[str, Any] = {"schema": kwargs.pop("schema", None)}
if index is not None:
from_pandas_kwargs["preserve_index"] = index
table = self.api.Table.from_pandas(df, **from_pandas_kwargs)
if is_fsspec_url(path) and "filesystem" not in kwargs:
# make fsspec instance, which pyarrow will use to open paths
import_optional_dependency("fsspec")
import fsspec.core
fs, path = fsspec.core.url_to_fs(path, **(storage_options or {}))
kwargs["filesystem"] = fs
else:
if storage_options:
raise ValueError(
"storage_options passed with file object or non-fsspec file path"
)
path = stringify_path(path)
if partition_cols is not None:
# writes to multiple files under the given path
self.api.parquet.write_to_dataset(
table,
path,
compression=compression,
partition_cols=partition_cols,
**kwargs,
)
else:
# write to single output file
self.api.parquet.write_table(table, path, compression=compression, **kwargs)
def read(
self, path, columns=None, storage_options: StorageOptions = None, **kwargs
):
if is_fsspec_url(path) and "filesystem" not in kwargs:
import_optional_dependency("fsspec")
import fsspec.core
fs, path = fsspec.core.url_to_fs(path, **(storage_options or {}))
should_close = False
else:
if storage_options:
raise ValueError(
"storage_options passed with buffer or non-fsspec filepath"
)
fs = kwargs.pop("filesystem", None)
should_close = False
path = stringify_path(path)
if not fs:
ioargs = get_filepath_or_buffer(path)
path = ioargs.filepath_or_buffer
should_close = ioargs.should_close
kwargs["use_pandas_metadata"] = True
result = self.api.parquet.read_table(
path, columns=columns, filesystem=fs, **kwargs
).to_pandas()
if should_close:
path.close()
return result
class FastParquetImpl(BaseImpl):
def __init__(self):
# since pandas is a dependency of fastparquet
# we need to import on first use
fastparquet = import_optional_dependency(
"fastparquet", extra="fastparquet is required for parquet support."
)
self.api = fastparquet
def write(
self,
df: DataFrame,
path,
compression="snappy",
index=None,
partition_cols=None,
storage_options: StorageOptions = None,
**kwargs,
):
self.validate_dataframe(df)
# thriftpy/protocol/compact.py:339:
# DeprecationWarning: tostring() is deprecated.
# Use tobytes() instead.
if "partition_on" in kwargs and partition_cols is not None:
raise ValueError(
"Cannot use both partition_on and "
"partition_cols. Use partition_cols for partitioning data"
)
elif "partition_on" in kwargs:
partition_cols = kwargs.pop("partition_on")
if partition_cols is not None:
kwargs["file_scheme"] = "hive"
if is_fsspec_url(path):
fsspec = import_optional_dependency("fsspec")
# if filesystem is provided by fsspec, file must be opened in 'wb' mode.
kwargs["open_with"] = lambda path, _: fsspec.open(
path, "wb", **(storage_options or {})
).open()
else:
if storage_options:
raise ValueError(
"storage_options passed with file object or non-fsspec file path"
)
path = get_filepath_or_buffer(path).filepath_or_buffer
with catch_warnings(record=True):
self.api.write(
path,
df,
compression=compression,
write_index=index,
partition_on=partition_cols,
**kwargs,
)
def read(
self, path, columns=None, storage_options: StorageOptions = None, **kwargs
):
if is_fsspec_url(path):
fsspec = import_optional_dependency("fsspec")
open_with = lambda path, _: fsspec.open(
path, "rb", **(storage_options or {})
).open()
parquet_file = self.api.ParquetFile(path, open_with=open_with)
else:
path = get_filepath_or_buffer(path).filepath_or_buffer
parquet_file = self.api.ParquetFile(path)
return parquet_file.to_pandas(columns=columns, **kwargs)
def to_parquet(
df: DataFrame,
path: FilePathOrBuffer[AnyStr],
engine: str = "auto",
compression: Optional[str] = "snappy",
index: Optional[bool] = None,
storage_options: StorageOptions = None,
partition_cols: Optional[List[str]] = None,
**kwargs,
):
"""
Write a DataFrame to the parquet format.
Parameters
----------
df : DataFrame
path : str or file-like object
If a string, it will be used as Root Directory path
when writing a partitioned dataset. By file-like object,
we refer to objects with a write() method, such as a file handler
(e.g. via builtin open function) or io.BytesIO. The engine
fastparquet does not accept file-like objects.
.. versionchanged:: 0.24.0
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
index : bool, default None
If ``True``, include the dataframe's index(es) in the file output. If
``False``, they will not be written to the file.
If ``None``, similar to ``True`` the dataframe's index(es)
will be saved. However, instead of being saved as values,
the RangeIndex will be stored as a range in the metadata so it
doesn't require much space and is faster. Other indexes will
be included as columns in the file output.
.. versionadded:: 0.24.0
partition_cols : str or list, optional, default None
Column names by which to partition the dataset.
Columns are partitioned in the order they are given.
Must be None if path is not a string.
.. versionadded:: 0.24.0
storage_options : dict, optional
Extra options that make sense for a particular storage connection, e.g.
host, port, username, password, etc., if using a URL that will
be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error
will be raised if providing this argument with a local path or
a file-like buffer. See the fsspec and backend storage implementation
docs for the set of allowed keys and values
.. versionadded:: 1.2.0
kwargs
Additional keyword arguments passed to the engine
"""
if isinstance(partition_cols, str):
partition_cols = [partition_cols]
impl = get_engine(engine)
return impl.write(
df,
path,
compression=compression,
index=index,
partition_cols=partition_cols,
storage_options=storage_options,
**kwargs,
)
def read_parquet(path, engine: str = "auto", columns=None, **kwargs):
"""
Load a parquet object from the file path, returning a DataFrame.
Parameters
----------
path : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.parquet``.
A file URL can also be a path to a directory that contains multiple
partitioned parquet files. Both pyarrow and fastparquet support
paths to directories as well as file URLs. A directory path could be:
``file://localhost/path/to/tables`` or ``s3://bucket/partition_dir``
If you want to pass in a path object, pandas accepts any
``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handler (e.g. via builtin ``open`` function)
or ``StringIO``.
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
columns : list, default=None
If not None, only these columns will be read from the file.
**kwargs
Any additional kwargs are passed to the engine.
Returns
-------
DataFrame
"""
impl = get_engine(engine)
return impl.read(path, columns=columns, **kwargs)
| bsd-3-clause |
jmfranck/pyspecdata | examples/text_only/basic_units.py | 3 | 11135 | """
===========
Basic Units
===========
"""
import math
import numpy as np
import matplotlib.units as units
import matplotlib.ticker as ticker
class ProxyDelegate:
def __init__(self, fn_name, proxy_type):
self.proxy_type = proxy_type
self.fn_name = fn_name
def __get__(self, obj, objtype=None):
return self.proxy_type(self.fn_name, obj)
class TaggedValueMeta(type):
def __init__(self, name, bases, dict):
for fn_name in self._proxies:
if not hasattr(self, fn_name):
setattr(self, fn_name,
ProxyDelegate(fn_name, self._proxies[fn_name]))
class PassThroughProxy:
def __init__(self, fn_name, obj):
self.fn_name = fn_name
self.target = obj.proxy_target
def __call__(self, *args):
fn = getattr(self.target, self.fn_name)
ret = fn(*args)
return ret
class ConvertArgsProxy(PassThroughProxy):
def __init__(self, fn_name, obj):
PassThroughProxy.__init__(self, fn_name, obj)
self.unit = obj.unit
def __call__(self, *args):
converted_args = []
for a in args:
try:
converted_args.append(a.convert_to(self.unit))
except AttributeError:
converted_args.append(TaggedValue(a, self.unit))
converted_args = tuple([c.get_value() for c in converted_args])
return PassThroughProxy.__call__(self, *converted_args)
class ConvertReturnProxy(PassThroughProxy):
def __init__(self, fn_name, obj):
PassThroughProxy.__init__(self, fn_name, obj)
self.unit = obj.unit
def __call__(self, *args):
ret = PassThroughProxy.__call__(self, *args)
return (NotImplemented if ret is NotImplemented
else TaggedValue(ret, self.unit))
class ConvertAllProxy(PassThroughProxy):
def __init__(self, fn_name, obj):
PassThroughProxy.__init__(self, fn_name, obj)
self.unit = obj.unit
def __call__(self, *args):
converted_args = []
arg_units = [self.unit]
for a in args:
if hasattr(a, 'get_unit') and not hasattr(a, 'convert_to'):
# if this arg has a unit type but no conversion ability,
# this operation is prohibited
return NotImplemented
if hasattr(a, 'convert_to'):
try:
a = a.convert_to(self.unit)
except Exception:
pass
arg_units.append(a.get_unit())
converted_args.append(a.get_value())
else:
converted_args.append(a)
if hasattr(a, 'get_unit'):
arg_units.append(a.get_unit())
else:
arg_units.append(None)
converted_args = tuple(converted_args)
ret = PassThroughProxy.__call__(self, *converted_args)
if ret is NotImplemented:
return NotImplemented
ret_unit = unit_resolver(self.fn_name, arg_units)
if ret_unit is NotImplemented:
return NotImplemented
return TaggedValue(ret, ret_unit)
class TaggedValue(metaclass=TaggedValueMeta):
_proxies = {'__add__': ConvertAllProxy,
'__sub__': ConvertAllProxy,
'__mul__': ConvertAllProxy,
'__rmul__': ConvertAllProxy,
'__cmp__': ConvertAllProxy,
'__lt__': ConvertAllProxy,
'__gt__': ConvertAllProxy,
'__len__': PassThroughProxy}
def __new__(cls, value, unit):
# generate a new subclass for value
value_class = type(value)
try:
subcls = type(f'TaggedValue_of_{value_class.__name__}',
(cls, value_class), {})
return object.__new__(subcls)
except TypeError:
return object.__new__(cls)
def __init__(self, value, unit):
self.value = value
self.unit = unit
self.proxy_target = self.value
def __getattribute__(self, name):
if name.startswith('__'):
return object.__getattribute__(self, name)
variable = object.__getattribute__(self, 'value')
if hasattr(variable, name) and name not in self.__class__.__dict__:
return getattr(variable, name)
return object.__getattribute__(self, name)
def __array__(self, dtype=object):
return np.asarray(self.value).astype(dtype)
def __array_wrap__(self, array, context):
return TaggedValue(array, self.unit)
def __repr__(self):
return 'TaggedValue({!r}, {!r})'.format(self.value, self.unit)
def __str__(self):
return str(self.value) + ' in ' + str(self.unit)
def __len__(self):
return len(self.value)
def __iter__(self):
# Return a generator expression rather than use `yield`, so that
# TypeError is raised by iter(self) if appropriate when checking for
# iterability.
return (TaggedValue(inner, self.unit) for inner in self.value)
def get_compressed_copy(self, mask):
new_value = np.ma.masked_array(self.value, mask=mask).compressed()
return TaggedValue(new_value, self.unit)
def convert_to(self, unit):
if unit == self.unit or not unit:
return self
try:
new_value = self.unit.convert_value_to(self.value, unit)
except AttributeError:
new_value = self
return TaggedValue(new_value, unit)
def get_value(self):
return self.value
def get_unit(self):
return self.unit
class BasicUnit:
def __init__(self, name, fullname=None):
self.name = name
if fullname is None:
fullname = name
self.fullname = fullname
self.conversions = dict()
def __repr__(self):
return f'BasicUnit({self.name})'
def __str__(self):
return self.fullname
def __call__(self, value):
return TaggedValue(value, self)
def __mul__(self, rhs):
value = rhs
unit = self
if hasattr(rhs, 'get_unit'):
value = rhs.get_value()
unit = rhs.get_unit()
unit = unit_resolver('__mul__', (self, unit))
if unit is NotImplemented:
return NotImplemented
return TaggedValue(value, unit)
def __rmul__(self, lhs):
return self*lhs
def __array_wrap__(self, array, context):
return TaggedValue(array, self)
def __array__(self, t=None, context=None):
ret = np.array([1])
if t is not None:
return ret.astype(t)
else:
return ret
def add_conversion_factor(self, unit, factor):
def convert(x):
return x*factor
self.conversions[unit] = convert
def add_conversion_fn(self, unit, fn):
self.conversions[unit] = fn
def get_conversion_fn(self, unit):
return self.conversions[unit]
def convert_value_to(self, value, unit):
conversion_fn = self.conversions[unit]
ret = conversion_fn(value)
return ret
def get_unit(self):
return self
class UnitResolver:
def addition_rule(self, units):
for unit_1, unit_2 in zip(units[:-1], units[1:]):
if unit_1 != unit_2:
return NotImplemented
return units[0]
def multiplication_rule(self, units):
non_null = [u for u in units if u]
if len(non_null) > 1:
return NotImplemented
return non_null[0]
op_dict = {
'__mul__': multiplication_rule,
'__rmul__': multiplication_rule,
'__add__': addition_rule,
'__radd__': addition_rule,
'__sub__': addition_rule,
'__rsub__': addition_rule}
def __call__(self, operation, units):
if operation not in self.op_dict:
return NotImplemented
return self.op_dict[operation](self, units)
unit_resolver = UnitResolver()
cm = BasicUnit('cm', 'centimeters')
inch = BasicUnit('inch', 'inches')
inch.add_conversion_factor(cm, 2.54)
cm.add_conversion_factor(inch, 1/2.54)
radians = BasicUnit('rad', 'radians')
degrees = BasicUnit('deg', 'degrees')
radians.add_conversion_factor(degrees, 180.0/np.pi)
degrees.add_conversion_factor(radians, np.pi/180.0)
secs = BasicUnit('s', 'seconds')
hertz = BasicUnit('Hz', 'Hertz')
minutes = BasicUnit('min', 'minutes')
secs.add_conversion_fn(hertz, lambda x: 1./x)
secs.add_conversion_factor(minutes, 1/60.0)
# radians formatting
def rad_fn(x, pos=None):
if x >= 0:
n = int((x / np.pi) * 2.0 + 0.25)
else:
n = int((x / np.pi) * 2.0 - 0.25)
if n == 0:
return '0'
elif n == 1:
return r'$\pi/2$'
elif n == 2:
return r'$\pi$'
elif n == -1:
return r'$-\pi/2$'
elif n == -2:
return r'$-\pi$'
elif n % 2 == 0:
return fr'${n//2}\pi$'
else:
return fr'${n}\pi/2$'
class BasicUnitConverter(units.ConversionInterface):
@staticmethod
def axisinfo(unit, axis):
"""Return AxisInfo instance for x and unit."""
if unit == radians:
return units.AxisInfo(
majloc=ticker.MultipleLocator(base=np.pi/2),
majfmt=ticker.FuncFormatter(rad_fn),
label=unit.fullname,
)
elif unit == degrees:
return units.AxisInfo(
majloc=ticker.AutoLocator(),
majfmt=ticker.FormatStrFormatter(r'$%i^\circ$'),
label=unit.fullname,
)
elif unit is not None:
if hasattr(unit, 'fullname'):
return units.AxisInfo(label=unit.fullname)
elif hasattr(unit, 'unit'):
return units.AxisInfo(label=unit.unit.fullname)
return None
@staticmethod
def convert(val, unit, axis):
if units.ConversionInterface.is_numlike(val):
return val
if np.iterable(val):
if isinstance(val, np.ma.MaskedArray):
val = val.astype(float).filled(np.nan)
out = np.empty(len(val))
for i, thisval in enumerate(val):
if np.ma.is_masked(thisval):
out[i] = np.nan
else:
try:
out[i] = thisval.convert_to(unit).get_value()
except AttributeError:
out[i] = thisval
return out
if np.ma.is_masked(val):
return np.nan
else:
return val.convert_to(unit).get_value()
@staticmethod
def default_units(x, axis):
"""Return the default unit for x or None."""
if np.iterable(x):
for thisx in x:
return thisx.unit
return x.unit
def cos(x):
if np.iterable(x):
return [math.cos(val.convert_to(radians).get_value()) for val in x]
else:
return math.cos(x.convert_to(radians).get_value())
units.registry[BasicUnit] = units.registry[TaggedValue] = BasicUnitConverter()
| bsd-3-clause |
mrocklin/blaze | blaze/tests/test_pytables.py | 1 | 5256 | import numpy as np
import os
import datashape as ds
import pytest
from toolz import first
from blaze import into
from blaze.utils import tmpfile
from blaze.compatibility import xfail
from blaze import PyTables, discover
import pandas as pd
tb = pytest.importorskip('tables')
try:
f = pd.HDFStore('foo')
except (RuntimeError, ImportError) as e:
pytest.skip('skipping test_hdfstore.py %s' % e)
else:
f.close()
os.remove('foo')
now = np.datetime64('now').astype('datetime64[us]')
@pytest.fixture
def x():
y = np.array([(1, 'Alice', 100),
(2, 'Bob', -200),
(3, 'Charlie', 300),
(4, 'Denis', 400),
(5, 'Edith', -500)],
dtype=[('id', '<i8'), ('name', 'S7'), ('amount', '<i8')])
return y
@pytest.yield_fixture
def tbfile(x):
with tmpfile('.h5') as filename:
f = tb.open_file(filename, mode='w')
d = f.create_table('/', 'title', x)
d.close()
f.close()
yield filename
@pytest.fixture
def raw_dt_data():
raw_dt_data = [[1, 'Alice', 100, now],
[2, 'Bob', -200, now],
[3, 'Charlie', 300, now],
[4, 'Denis', 400, now],
[5, 'Edith', -500, now]]
for i, d in enumerate(raw_dt_data):
d[-1] += np.timedelta64(i, 'D')
return list(map(tuple, raw_dt_data))
@pytest.fixture
def dt_data(raw_dt_data):
return np.array(raw_dt_data, dtype=np.dtype([('id', 'i8'),
('name', 'S7'),
('amount', 'f8'),
('date', 'M8[ms]')]))
@pytest.yield_fixture
def dt_tb(dt_data):
class Desc(tb.IsDescription):
id = tb.Int64Col(pos=0)
name = tb.StringCol(itemsize=7, pos=1)
amount = tb.Float64Col(pos=2)
date = tb.Time64Col(pos=3)
non_date_types = list(zip(['id', 'name', 'amount'], ['i8', 'S7', 'f8']))
# has to be in microseconds as per pytables spec
dtype = np.dtype(non_date_types + [('date', 'M8[us]')])
rec = dt_data.astype(dtype)
# also has to be a floating point number
dtype = np.dtype(non_date_types + [('date', 'f8')])
rec = rec.astype(dtype)
rec['date'] /= 1e6
with tmpfile('.h5') as filename:
f = tb.open_file(filename, mode='w')
d = f.create_table('/', 'dt', description=Desc)
d.append(rec)
d.close()
f.close()
yield filename
class TestPyTablesLight(object):
def test_read(self, tbfile):
t = PyTables(path=tbfile, datapath='/title')
shape = t.shape
t._v_file.close()
assert shape == (5,)
def test_write_no_dshape(self, tbfile):
with pytest.raises(ValueError):
PyTables(path=tbfile, datapath='/write_this')
@xfail(raises=NotImplementedError,
reason='PyTables does not support object columns')
def test_write_with_bad_dshape(self, tbfile):
dshape = '{id: int, name: string, amount: float32}'
PyTables(path=tbfile, datapath='/write_this', dshape=dshape)
def test_write_with_dshape(self, tbfile):
f = tb.open_file(tbfile, mode='a')
try:
assert '/write_this' not in f
finally:
f.close()
del f
# create our table
dshape = '{id: int, name: string[7, "ascii"], amount: float32}'
t = PyTables(path=tbfile, datapath='/write_this', dshape=dshape)
shape = t.shape
filename = t._v_file.filename
t._v_file.close()
assert filename == tbfile
assert shape == (0,)
@xfail(reason="Don't yet support datetimes")
def test_table_into_ndarray(self, dt_tb, dt_data):
t = PyTables(dt_tb, '/dt')
res = into(np.ndarray, t)
try:
for k in res.dtype.fields:
lhs, rhs = res[k], dt_data[k]
if (issubclass(np.datetime64, lhs.dtype.type) and
issubclass(np.datetime64, rhs.dtype.type)):
lhs, rhs = lhs.astype('M8[us]'), rhs.astype('M8[us]')
assert np.array_equal(lhs, rhs)
finally:
t._v_file.close()
def test_ndarray_into_table(self, dt_tb, dt_data):
dtype = ds.from_numpy(dt_data.shape, dt_data.dtype)
t = PyTables(dt_tb, '/out', dtype)
try:
res = into(np.ndarray, into(t, dt_data, filename=dt_tb, datapath='/out'))
for k in res.dtype.fields:
lhs, rhs = res[k], dt_data[k]
if (issubclass(np.datetime64, lhs.dtype.type) and
issubclass(np.datetime64, rhs.dtype.type)):
lhs, rhs = lhs.astype('M8[us]'), rhs.astype('M8[us]')
assert np.array_equal(lhs, rhs)
finally:
t._v_file.close()
def test_datetime_discovery(self, dt_tb, dt_data):
t = PyTables(dt_tb, '/dt')
lhs, rhs = map(discover, (t, dt_data))
t._v_file.close()
assert lhs == rhs
def test_no_extra_files_around(self, dt_tb):
""" check the context manager auto-closes the resources """
assert not len(tb.file._open_files)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.