repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
mjudsp/Tsallis | sklearn/decomposition/tests/test_truncated_svd.py | 73 | 6086 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr, decimal=5)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=2)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features + 1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
cybercomgroup/Big_Data | Cloudera/Code/SVM/titanic_data_prediction.py | 1 | 1881 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
train_set = pd.read_csv('../Titanic_Dataset/train.csv')
test_set = pd.read_csv('../Titanic_Dataset/test.csv')
###Data preparation
train_set["Embarked"].replace("S", "0", True)
train_set["Embarked"].replace("C", "1", True)
train_set["Embarked"].replace("Q", "2", True)
train_set["Sex"].replace("male", "0", True)
train_set["Sex"].replace("female", "1", True)
del train_set["Name"]
del train_set["Cabin"]
del train_set["Ticket"]
del train_set["PassengerId"]
train_set.fillna(0, None, None, True)
titanic_results = train_set["Survived"]
del train_set["Survived"]
###End data preparation
##Selftest
machine=svm.SVC()
machine.fit(train_set.values, titanic_results.values)
predicted_survival=machine.predict(train_set.values)
predictionSuccess=(1-np.mean(predicted_survival != titanic_results.values))*100
print("Test against training set(self test): "+str(predictionSuccess)+"% correctness")
###End selftest
###Predict survival of test.csv
test_set["Embarked"].replace("S", "0", True)
test_set["Embarked"].replace("C", "1", True)
test_set["Embarked"].replace("Q", "2", True)
test_set["Sex"].replace("male", "0", True)
test_set["Sex"].replace("female", "1", True)
del test_set["Name"]
del test_set["Cabin"]
del test_set["Ticket"]
del test_set["PassengerId"]
test_set.fillna(0, None, None, True)
#print(test_set.values)
test_prediction=machine.predict(test_set)
#print("Test aganst test set: "+str(test_prediction))
untouchedTest = pd.read_csv('../Titanic_Dataset/test.csv')
untouchedTest=untouchedTest["PassengerId"]
untouchedTest.columns=["PassengerId"]
predictionDF=pd.DataFrame(test_prediction, np.arange(len(test_prediction)),columns=["Survived"])
joinedDF=pd.concat([untouchedTest,predictionDF], axis=1)
joinedDF.to_csv("predicted.csv",index=False)
###End Predict
| gpl-3.0 |
adrian-ionescu/apache-spark | python/setup.py | 11 | 9765 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import glob
import os
import sys
from setuptools import setup, find_packages
from shutil import copyfile, copytree, rmtree
if sys.version_info < (2, 7):
print("Python versions prior to 2.7 are not supported for pip installed PySpark.",
file=sys.stderr)
exit(-1)
try:
exec(open('pyspark/version.py').read())
except IOError:
print("Failed to load PySpark version file for packaging. You must be in Spark's python dir.",
file=sys.stderr)
sys.exit(-1)
VERSION = __version__
# A temporary path so we can access above the Python project root and fetch scripts and jars we need
TEMP_PATH = "deps"
SPARK_HOME = os.path.abspath("../")
# Provide guidance about how to use setup.py
incorrect_invocation_message = """
If you are installing pyspark from spark source, you must first build Spark and
run sdist.
To build Spark with maven you can run:
./build/mvn -DskipTests clean package
Building the source dist is done in the Python directory:
cd python
python setup.py sdist
pip install dist/*.tar.gz"""
# Figure out where the jars are we need to package with PySpark.
JARS_PATH = glob.glob(os.path.join(SPARK_HOME, "assembly/target/scala-*/jars/"))
if len(JARS_PATH) == 1:
JARS_PATH = JARS_PATH[0]
elif (os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1):
# Release mode puts the jars in a jars directory
JARS_PATH = os.path.join(SPARK_HOME, "jars")
elif len(JARS_PATH) > 1:
print("Assembly jars exist for multiple scalas ({0}), please cleanup assembly/target".format(
JARS_PATH), file=sys.stderr)
sys.exit(-1)
elif len(JARS_PATH) == 0 and not os.path.exists(TEMP_PATH):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
EXAMPLES_PATH = os.path.join(SPARK_HOME, "examples/src/main/python")
SCRIPTS_PATH = os.path.join(SPARK_HOME, "bin")
DATA_PATH = os.path.join(SPARK_HOME, "data")
LICENSES_PATH = os.path.join(SPARK_HOME, "licenses")
SCRIPTS_TARGET = os.path.join(TEMP_PATH, "bin")
JARS_TARGET = os.path.join(TEMP_PATH, "jars")
EXAMPLES_TARGET = os.path.join(TEMP_PATH, "examples")
DATA_TARGET = os.path.join(TEMP_PATH, "data")
LICENSES_TARGET = os.path.join(TEMP_PATH, "licenses")
# Check and see if we are under the spark path in which case we need to build the symlink farm.
# This is important because we only want to build the symlink farm while under Spark otherwise we
# want to use the symlink farm. And if the symlink farm exists under while under Spark (e.g. a
# partially built sdist) we should error and have the user sort it out.
in_spark = (os.path.isfile("../core/src/main/scala/org/apache/spark/SparkContext.scala") or
(os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1))
def _supports_symlinks():
"""Check if the system supports symlinks (e.g. *nix) or not."""
return getattr(os, "symlink", None) is not None
if (in_spark):
# Construct links for setup
try:
os.mkdir(TEMP_PATH)
except:
print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
file=sys.stderr)
exit(-1)
try:
# We copy the shell script to be under pyspark/python/pyspark so that the launcher scripts
# find it where expected. The rest of the files aren't copied because they are accessed
# using Python imports instead which will be resolved correctly.
try:
os.makedirs("pyspark/python/pyspark")
except OSError:
# Don't worry if the directory already exists.
pass
copyfile("pyspark/shell.py", "pyspark/python/pyspark/shell.py")
if (in_spark):
# Construct the symlink farm - this is necessary since we can't refer to the path above the
# package root and we need to copy the jars and scripts which are up above the python root.
if _supports_symlinks():
os.symlink(JARS_PATH, JARS_TARGET)
os.symlink(SCRIPTS_PATH, SCRIPTS_TARGET)
os.symlink(EXAMPLES_PATH, EXAMPLES_TARGET)
os.symlink(DATA_PATH, DATA_TARGET)
os.symlink(LICENSES_PATH, LICENSES_TARGET)
else:
# For windows fall back to the slower copytree
copytree(JARS_PATH, JARS_TARGET)
copytree(SCRIPTS_PATH, SCRIPTS_TARGET)
copytree(EXAMPLES_PATH, EXAMPLES_TARGET)
copytree(DATA_PATH, DATA_TARGET)
copytree(LICENSES_PATH, LICENSES_TARGET)
else:
# If we are not inside of SPARK_HOME verify we have the required symlink farm
if not os.path.exists(JARS_TARGET):
print("To build packaging must be in the python directory under the SPARK_HOME.",
file=sys.stderr)
if not os.path.isdir(SCRIPTS_TARGET):
print(incorrect_invocation_message, file=sys.stderr)
exit(-1)
# Scripts directive requires a list of each script path and does not take wild cards.
script_names = os.listdir(SCRIPTS_TARGET)
scripts = list(map(lambda script: os.path.join(SCRIPTS_TARGET, script), script_names))
# We add find_spark_home.py to the bin directory we install so that pip installed PySpark
# will search for SPARK_HOME with Python.
scripts.append("pyspark/find_spark_home.py")
# Parse the README markdown file into rst for PyPI
long_description = "!!!!! missing pandoc do not upload to PyPI !!!!"
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except ImportError:
print("Could not import pypandoc - required to package PySpark", file=sys.stderr)
except OSError:
print("Could not convert - pandoc is not installed", file=sys.stderr)
setup(
name='pyspark',
version=VERSION,
description='Apache Spark Python API',
long_description=long_description,
author='Spark Developers',
author_email='[email protected]',
url='https://github.com/apache/spark/tree/master/python',
packages=['pyspark',
'pyspark.mllib',
'pyspark.mllib.linalg',
'pyspark.mllib.stat',
'pyspark.ml',
'pyspark.ml.linalg',
'pyspark.ml.param',
'pyspark.sql',
'pyspark.streaming',
'pyspark.bin',
'pyspark.jars',
'pyspark.python.pyspark',
'pyspark.python.lib',
'pyspark.data',
'pyspark.licenses',
'pyspark.examples.src.main.python'],
include_package_data=True,
package_dir={
'pyspark.jars': 'deps/jars',
'pyspark.bin': 'deps/bin',
'pyspark.python.lib': 'lib',
'pyspark.data': 'deps/data',
'pyspark.licenses': 'deps/licenses',
'pyspark.examples.src.main.python': 'deps/examples',
},
package_data={
'pyspark.jars': ['*.jar'],
'pyspark.bin': ['*'],
'pyspark.python.lib': ['*.zip'],
'pyspark.data': ['*.txt', '*.data'],
'pyspark.licenses': ['*.txt'],
'pyspark.examples.src.main.python': ['*.py', '*/*.py']},
scripts=scripts,
license='http://www.apache.org/licenses/LICENSE-2.0',
install_requires=['py4j==0.10.6'],
setup_requires=['pypandoc'],
extras_require={
'ml': ['numpy>=1.7'],
'mllib': ['numpy>=1.7'],
'sql': ['pandas>=0.13.0']
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy']
)
finally:
# We only cleanup the symlink farm if we were in Spark, otherwise we are installing rather than
# packaging.
if (in_spark):
# Depending on cleaning up the symlink farm or copied version
if _supports_symlinks():
os.remove(os.path.join(TEMP_PATH, "jars"))
os.remove(os.path.join(TEMP_PATH, "bin"))
os.remove(os.path.join(TEMP_PATH, "examples"))
os.remove(os.path.join(TEMP_PATH, "data"))
os.remove(os.path.join(TEMP_PATH, "licenses"))
else:
rmtree(os.path.join(TEMP_PATH, "jars"))
rmtree(os.path.join(TEMP_PATH, "bin"))
rmtree(os.path.join(TEMP_PATH, "examples"))
rmtree(os.path.join(TEMP_PATH, "data"))
rmtree(os.path.join(TEMP_PATH, "licenses"))
os.rmdir(TEMP_PATH)
| apache-2.0 |
rsivapr/scikit-learn | sklearn/metrics/pairwise.py | 2 | 37180 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.metrics.pairwise` submodule implements utilities to evaluate
pairwise distances or affinity of sets of samples.
This module contains both distance metrics and kernels. A brief summary is
given on the two here.
Distance metrics are a function d(a, b) such that d(a, b) < d(a, c) if objects
a and b are considered "more similar" to objects a and c. Two objects exactly
alike would have a distance of zero.
One of the most popular examples is Euclidean distance.
To be a 'true' metric, it must obey the following four conditions::
1. d(a, b) >= 0, for all a and b
2. d(a, b) == 0, if and only if a = b, positive definiteness
3. d(a, b) == d(b, a), symmetry
4. d(a, c) <= d(a, b) + d(b, c), the triangle inequality
Kernels are measures of similarity, i.e. ``s(a, b) > s(a, c)``
if objects ``a`` and ``b`` are considered "more similar" to objects
``a`` and ``c``. A kernel must also be positive semi-definite.
There are a number of ways to convert between a distance metric and a
similarity measure, such as a kernel. Let D be the distance, and S be the
kernel:
1. ``S = np.exp(-D * gamma)``, where one heuristic for choosing
``gamma`` is ``1 / num_features``
2. ``S = 1. / (D / np.max(D))``
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import atleast2d_or_csr
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils import safe_asarray
from ..utils.extmath import safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast
# Utility Functions
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples_a, n_features]
Y : {array-like, sparse matrix}, shape = [n_samples_b, n_features]
Returns
-------
safe_X : {array-like, sparse matrix}, shape = [n_samples_a, n_features]
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape = [n_samples_b, n_features]
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
if Y is X or Y is None:
X = Y = atleast2d_or_csr(X)
else:
X = atleast2d_or_csr(X)
Y = atleast2d_or_csr(Y)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
if not (X.dtype == Y.dtype == np.float32):
if Y is X:
X = Y = safe_asarray(X, dtype=np.float)
else:
X = safe_asarray(X, dtype=np.float)
Y = safe_asarray(Y, dtype=np.float)
return X, Y
# Distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two main advantages. First, it is computationally
efficient when dealing with sparse data. Second, if x varies but y
remains unchanged, then the right-most dot-product `dot(y, y)` can be
pre-computed.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples_1, n_features]
Y : {array-like, sparse matrix}, shape = [n_samples_2, n_features]
Y_norm_squared : array-like, shape = [n_samples_2], optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape = [n_samples_1, n_samples_2]
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if issparse(X):
XX = X.multiply(X).sum(axis=1)
else:
XX = np.sum(X * X, axis=1)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is None:
if issparse(Y):
# scipy.sparse matrices don't have element-wise scalar
# exponentiation, and tocsr has a copy kwarg only on CSR matrices.
YY = Y.copy() if isinstance(Y, csr_matrix) else Y.tocsr()
YY.data **= 2
YY = np.asarray(YY.sum(axis=1)).T
else:
YY = np.sum(Y ** 2, axis=1)[np.newaxis, :]
else:
YY = atleast2d_or_csr(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs={}):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
==========
X, Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
Returns
=======
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
========
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype='int32')
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
dist_chunk = np.dot(X_chunk, Y_chunk.T)
dist_chunk *= -2
dist_chunk += (X_chunk * X_chunk
).sum(axis=1)[:, np.newaxis]
dist_chunk += (Y_chunk * Y_chunk
).sum(axis=1)[np.newaxis, :]
np.maximum(dist_chunk, 0, dist_chunk)
else:
dist_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
dist_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = dist_chunk.argmin(axis=1)
min_values = dist_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x] = np.where(
flags, min_indices + chunk_y.start, indices[chunk_x])
values[chunk_x] = np.where(
flags, min_values, values[chunk_x])
if metric == "euclidean" and not metric_kwargs.get("squared", False):
values = np.sqrt(values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs={}):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
==========
X, Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
Returns
=======
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
========
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
size_threshold : int, default=5e8
Avoid creating temporary matrices bigger than size_threshold (in
bytes). If the problem size gets too big, the implementation then
breaks it down in smaller problems.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise l1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
if issparse(X) or issparse(Y):
raise ValueError("manhattan_distance does not support sparse"
" matrices.")
X, Y = check_pairwise_arrays(X, Y)
temporary_size = X.size * Y.shape[-1]
# Convert to bytes
temporary_size *= X.itemsize
if temporary_size > size_threshold and sum_over_features:
# Broadcasting the full thing would be too big: it's on the order
# of magnitude of the gigabyte
D = np.empty((X.shape[0], Y.shape[0]), dtype=X.dtype)
index = 0
increment = 1 + int(size_threshold / float(temporary_size) *
X.shape[0])
while index < X.shape[0]:
this_slice = slice(index, index + increment)
tmp = X[this_slice, np.newaxis, :] - Y[np.newaxis, :, :]
tmp = np.abs(tmp, tmp)
tmp = np.sum(tmp, axis=2)
D[this_slice] = tmp
index += increment
else:
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
if sum_over_features:
D = np.sum(D, axis=2)
else:
D = D.reshape((-1, X.shape[1]))
return D
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
degree : int
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
degree : int
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = linear_kernel(X_normalized, Y_normalized)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
ret = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(func)(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Please note that support for sparse matrices is currently limited to
'euclidean', 'l2' and 'cosine'.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
if n_jobs == 1:
return func(X, Y, **kwds)
else:
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate distance for each element in X and Y.
# FIXME: can use n_jobs here too
# FIXME: np.zeros can be replaced by np.empty
D = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# distance assumed to be symmetric.
D[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
D[j][i] = D[i][j]
return D
else:
# Note: the distance module doesn't support sparse matrices!
if type(X) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
if Y is None:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
else:
if type(Y) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
return distance.cdist(X, Y, metric=metric, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
if n_jobs == 1:
return func(X, Y, **kwds)
else:
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate kernel for each element in X and Y.
K = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# Kernel assumed to be symmetric.
K[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
K[j][i] = K[i][j]
return K
else:
raise ValueError("Unknown kernel %r" % metric)
| bsd-3-clause |
MohammedWasim/scikit-learn | examples/mixture/plot_gmm_sin.py | 248 | 2747 | """
=================================
Gaussian Mixture Model Sine Curve
=================================
This example highlights the advantages of the Dirichlet Process:
complexity control and dealing with sparse data. The dataset is formed
by 100 points loosely spaced following a noisy sine curve. The fit by
the GMM class, using the expectation-maximization algorithm to fit a
mixture of 10 Gaussian components, finds too-small components and very
little structure. The fits by the Dirichlet process, however, show
that the model can either learn a global structure for the data (small
alpha) or easily interpolate to finding relevant local structure
(large alpha), never falling into the problems shown by the GMM class.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
from sklearn.externals.six.moves import xrange
# Number of samples per component
n_samples = 100
# Generate random sample following a sine curve
np.random.seed(0)
X = np.zeros((n_samples, 2))
step = 4 * np.pi / n_samples
for i in xrange(X.shape[0]):
x = i * step - 6
X[i, 0] = x + np.random.normal(0, 0.1)
X[i, 1] = 3 * (np.sin(x) + np.random.normal(0, .2))
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([
(mixture.GMM(n_components=10, covariance_type='full', n_iter=100),
"Expectation-maximization"),
(mixture.DPGMM(n_components=10, covariance_type='full', alpha=0.01,
n_iter=100),
"Dirichlet Process,alpha=0.01"),
(mixture.DPGMM(n_components=10, covariance_type='diag', alpha=100.,
n_iter=100),
"Dirichlet Process,alpha=100.")]):
clf.fit(X)
splot = plt.subplot(3, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-6, 4 * np.pi - 6)
plt.ylim(-5, 5)
plt.title(title)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
wazeerzulfikar/scikit-learn | examples/feature_selection/plot_feature_selection_pipeline.py | 58 | 1049 | """
==================
Pipeline Anova SVM
==================
Simple usage of Pipeline that runs successively a univariate
feature selection with anova and then a C-SVM of the selected features.
"""
from sklearn import svm
from sklearn.datasets import samples_generator
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
print(__doc__)
# import some data to play with
X, y = samples_generator.make_classification(
n_features=20, n_informative=3, n_redundant=0, n_classes=4,
n_clusters_per_class=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# ANOVA SVM-C
# 1) anova filter, take 3 best ranked features
anova_filter = SelectKBest(f_regression, k=3)
# 2) svm
clf = svm.SVC(kernel='linear')
anova_svm = make_pipeline(anova_filter, clf)
anova_svm.fit(X_train, y_train)
y_pred = anova_svm.predict(X_test)
print(classification_report(y_test, y_pred))
| bsd-3-clause |
aminert/scikit-learn | benchmarks/bench_lasso.py | 297 | 3305 | """
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import pylab as pl
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
pl.figure('scikit-learn LASSO benchmark results')
pl.subplot(211)
pl.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
pl.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
pl.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
pl.subplot(212)
pl.plot(list_n_features, lasso_results, 'b-', label='Lasso')
pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
pl.title('%d samples, alpha=%s' % (n_samples, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
akita8/risparmi | core.py | 1 | 15428 | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 25 15:33:17 2016
@author: marco
"""
from requests.exceptions import ConnectionError
import datetime as dt
import numpy as np
from pandas import DataFrame, Series, set_option
from pandas_datareader import data
from shutil import copytree
from os import path, stat
class Risparmi:
def __init__(self, directory='fabio'):
set_option('expand_frame_repr', False)
set_option('precision', 4)
# crea una nuova cartella con il nome passato a init
if not path.exists(directory):
# e il contenuto di asset(template)
copytree('assets(template)', directory)
self.date = dt.datetime.today().strftime("%d/%m/%y")
self.directory = directory
self.assets = {'stock': DataFrame(), 'bond': DataFrame(), 'currency': DataFrame(),
'cash': DataFrame()}
self.assets_inizialization() # inizializzazione asset da file csv
self.stock_val_account = {}
self.stock_val_sector = {}
self.updated_assets_values_gen()
self.stock_report_gen()
self.bond_report_gen()
def assets_inizialization(self):
'''Popola il dizionario con i valori localizzati nella cartella assets'''
for el in self.assets:
path_to_data = self.directory + '/' + el + '.csv'
self.assets[el] = self.assets[el].from_csv(path_to_data)
def get_asset_values_list(self, asset_type, asset_value):
return list(set(self.assets[asset_type][asset_value]))
def get_asset_preloded(self, symbol):
asset_types = ['stock', 'bond', 'currency']
for asset_type in asset_types:
preloaded = self.assets[asset_type][
self.assets[asset_type].simbolo == symbol]
if not preloaded.empty:
return preloaded.head(1).to_dict(orient='list')
def get_complete_symbol_list(self):
asset_types = ['stock', 'bond', 'currency']
complete_list = []
for asset_type in asset_types:
complete_list.extend(list(set(self.assets[asset_type]['simbolo'])))
return complete_list
def get_inizialized_values(self, asset_type, external=True, asset=None):
with open(self.directory + '/' + asset_type + '.csv', 'r') as f:
header = f.readline().strip('\n')
header_list = header[1:].split(',')
if external:
return {header: '' for header in header_list}
else:
return dict(zip(header_list, asset))
def save_to_csv(self):
'''Accede a ogni dataframe del dizionario e lo salva sul corrispondente file CSV'''
for el in self.assets:
file_path = self.directory + '/' + el + '.csv'
temp = self.assets[el].to_csv()
f = open(file_path, 'w')
f.write(temp)
f.close()
def updated_asset_values_save(self):
with open('stock_value.csv', 'w') as f:
f.write(self.updated_stock_values.to_csv(header=True))
with open('currency_value.csv', 'w') as f:
f.write(self.updated_currency_values.to_csv(header=True))
def add_asset_transaction(self, transaction_values_dict, asset_type):
self.assets[asset_type].loc[
len(self.assets[asset_type].index)] = transaction_values_dict
def updated_assets_values_gen(self, forced=False):
'''Crea un dataframe dei valori attuali di stock e currency dal sito Yahoo Finance'''
last_download = dt.datetime.fromtimestamp(
stat('stock_value.csv').st_mtime).strftime("%d/%m/%y")
if last_download != self.date or forced:
try:
# stocks
self.updated_stock_values = DataFrame()
for symbol in list(set(self.assets['stock']['simbolo'])):
temp = data.get_quote_yahoo(symbol)
self.updated_stock_values = self.updated_stock_values.append(
temp)
self.updated_stock_values.index.name = 'simbolo'
self.updated_stock_values = self.updated_stock_values.drop(
['PE', 'short_ratio', 'time', 'change_pct'], axis=1)
# currency
self.updated_currency_values = DataFrame()
for symbol in list(set(self.assets['currency']['simbolo'])):
temp = data.get_quote_yahoo(symbol)
self.updated_currency_values = self.updated_currency_values.append(
temp)
self.updated_currency_values.index.name = 'simbolo'
self.updated_currency_values = self.updated_currency_values.drop(
['PE', 'short_ratio', 'time', 'change_pct'], axis=1)
self.updated_asset_values_save()
except ConnectionError:
self.updated_stock_values = DataFrame().from_csv('stock_value.csv')
self.updated_currency_values = DataFrame().from_csv('currency_value.csv')
else:
self.updated_stock_values = DataFrame().from_csv('stock_value.csv')
self.updated_currency_values = DataFrame().from_csv('currency_value.csv')
def stock_report_gen(self):
def stock_agg_func(*args):
'''da gestire il caso con conversione in euro per flag'''
flag = False # temporanea
asset = args[0]
index = ['conto', 'simbolo', 'settore', 'valuta', 'quantità', 'acquisto_tot',
'vendita_tot', 'cedola-dividendo_netto_tot']
report = [asset[9], asset[0], asset[3], asset[4]]
if asset[7] == 'acquisto':
tobin_tax = asset[8]
gross_price = asset[11] * asset[10]
commission = asset[13]
if np.isnan(tobin_tax) == False:
tobin_tax *= gross_price
else:
tobin_tax = 0
if flag and np.isnan(asset[15]) == False:
gross_price = (asset[11] * asset[15]) * asset[10]
commission *= asset[15]
temp = tobin_tax + gross_price + commission
report.append(asset[10])
report.append(temp)
report.append(np.nan)
report.append(np.nan)
elif asset[7] == 'vendita':
gross_price = asset[11] * asset[10]
tax = gross_price * asset[14]
commission = asset[13]
if flag and np.isnan(asset[15]) == False:
gross_price = (asset[11] * asset[15]) * asset[10]
tax = gross_price * asset[14]
commission *= asset[15]
temp = gross_price - commission - tax
report.append((-1) * asset[10])
report.append(np.nan)
report.append(temp)
report.append(np.nan)
elif asset[7] == 'cedola-dividendo':
gross_dividend = asset[12]
tax = gross_dividend * asset[14]
# # cosi è giusto secondo flake
if flag and not np.isnan(asset[15]):
gross_dividend *= asset[15]
tax = gross_dividend * asset[14]
temp = gross_dividend - tax
report.append(np.nan)
report.append(np.nan)
report.append(np.nan)
report.append(temp)
return Series(report, index=index)
def stock_report_func(*args):
asset = args[0]
price = asset[0]
currency = asset.name[0].lower()
symbol = asset.name[3]
account = asset.name[1]
settore = asset.name[2]
# questo if serve per aggiustare i prezzi(yahoo) inglesi
if currency == 'sterline':
# asset.name è la variabile per accedere all indice/i della
# riga e gli indici dipendono dal groupby in fondo
price = asset[0] / 100.0
#print (str(price)+''+str(asset[1])+''+str(type(price))+''+str(type(asset[1])))
flash = price * asset[1]
# questa funzione serve a calcolare la valorizzazione dei conti, è
# temporanea
self.val_gen(symbol, account, flash, settore)
status = flash - asset[2]
if np.isnan(asset[3]) == False: # questo aggiunge le vendite a status
status += asset[3]
if np.isnan(asset[4]) == False: # questo aggiunge i dividendi a status
status += asset[4]
report = [asset[2], asset[3], asset[4], price, flash, status]
index = ['investito', 'venduto', 'dividendi',
'prezzo', 'flash', 'status']
return Series(report, index=index)
# pylint:enable = E1101
aggregated_stock = self.assets['stock'].apply(
stock_agg_func, axis=1).groupby(['valuta', 'conto', 'settore',
'simbolo']).sum()
joined_stock = self.updated_stock_values.join(
aggregated_stock, how='inner')
self.stock_report = joined_stock.apply(stock_report_func, axis=1)
self.active_stock_report = self.stock_report.query('flash!=0.0')
def bond_report_gen(self):
def bond_agg_func(*args):
asset = self.get_inizialized_values(
'bond', external=False, asset=args[0])
#print (asset)
coupon = asset['cedola-dividendo']
# gestisco le date
date_formating = lambda y: (
list(map((lambda x: int(x)), y.split('-'))))
issue_date = dt.date(*date_formating(asset['data_emissione']))
refund_date = dt.date(*date_formating(asset['data_rimborso']))
transfer_date = dt.date(
*date_formating(asset['data'])) # data valuta
first_coupon_date = dt.date(
transfer_date.year, issue_date.month, issue_date.day)
# print(first_coupon_date)
if first_coupon_date > transfer_date:
first_coupon_date = dt.date(
first_coupon_date.year - 1, issue_date.month, issue_date.day)
if asset['tipologia'] == 'annuale':
days_to_coupon = (transfer_date - first_coupon_date).days
first_next_coupon = dt.date(
first_coupon_date.year + 1, first_coupon_date.month, first_coupon_date.day)
days_between_coupon = (
first_next_coupon - first_coupon_date).days
# print(days_to_coupon)
# print(days_between_coupon)
else:
month_second_coupon = first_coupon_date.month + 6
year_second_coupon = first_coupon_date.year
if month_second_coupon > 12:
month_second_coupon -= 12
year_second_coupon += 1
second_coupon_date = dt.date(
year_second_coupon, month_second_coupon, first_coupon_date.day)
#print([first_coupon_date, second_coupon_date])
if second_coupon_date > transfer_date:
days_to_coupon = (transfer_date - first_coupon_date).days
else:
days_to_coupon = (transfer_date - second_coupon_date).days
# print(days_to_coupon)
if transfer_date > second_coupon_date:
first_next_coupon = dt.date(
first_coupon_date.year + 1, first_coupon_date.month, first_coupon_date.day)
days_between_coupon = (
first_next_coupon - second_coupon_date).days
else:
days_between_coupon = (
second_coupon_date - first_coupon_date).days
# print(days_between_coupon)
coupon /= 2
days_from_issue_date = (transfer_date - issue_date).days
bond_life = (refund_date - issue_date).days
# print(days_from_issue_date)
# print(bond_life)
price_r = 100.0
price_i = asset['prezzo_emissione']
price_t = asset['prezzo_acquisto-vendita']
quantity = asset['quantità']
commission = asset['commissione']
tax = asset['tasse']
# acquisto puro
x = quantity * (price_t / 100)
# rateo maturato all acquisto
y = ((coupon * days_to_coupon) / days_between_coupon) * quantity
# ritenuta sul rateo all acquisto
z = y * tax
# disaggio di emissione maturato
k = (((price_r - price_i) * days_from_issue_date) /
bond_life) * (quantity / 100)
# ritenuta sul disaggio
h = k * tax
# formula investimento(gestire il disaggio negativo)
price_purchase = x + y - z - h + commission
#print('{0} {1} {2} {3} {4}'.format(x, y, z, k, price_purchase))
return Series({'conto': asset['conto'], 'simbolo': asset['simbolo'], 'prezzo': price_purchase})
self.bond_report = self.assets['bond'].apply(
bond_agg_func, axis=1).groupby(['conto', 'simbolo']).sum()
def get_exchange_rate(self): # temporanea
usd = self.updated_currency_values.at['eurusd=x', 'last']
gbp = self.updated_currency_values.at['eurgbp=x', 'last'] # cambi
chf = self.updated_currency_values.at['eurchf=x', 'last']
return {'Dollari': usd, 'Sterline': gbp, 'Franchi Svizzeri': chf}
def get_single_stock_report(self, selection):
stock_report_copy = self.stock_report.copy()
unstacked_report = stock_report_copy.reset_index(
level=['valuta', 'conto', 'settore'], drop=True).reset_index(
level=['simbolo'])
df = unstacked_report.groupby(['simbolo']).sum()
try:
ris = df.loc[selection]
ris['prezzo'] = self.updated_stock_values.at[selection, 'last']
except KeyError:
return 'simbolo non presente'
return ris.to_dict()
def val_gen(self, symbol, account, flash, settore): # temporanea
'''serve a popolare il dizionario che contiene la valorizzazione dei conti'''
is_italian = False
if symbol[len(symbol) - 3:].lower() == '.mi':
is_italian = True
elif symbol[len(symbol) - 2:].lower() == '.l' and is_italian == False:
# cosi estrapolo il valore(float) del cambio
cambio = self.updated_currency_values.at['eurgbp=x', 'last']
flash = flash / cambio
elif symbol[len(symbol) - 3:].lower() == '.vx' and is_italian == False:
cambio = self.updated_currency_values.at['eurchf=x', 'last']
flash = flash / cambio
elif is_italian == False:
cambio = self.updated_currency_values.at['eurusd=x', 'last']
flash = flash / cambio
if account not in self.stock_val_account:
self.stock_val_account[account] = flash
else:
self.stock_val_account[account] += flash
if settore not in self.stock_val_sector:
self.stock_val_sector[settore] = flash
else:
self.stock_val_sector[settore] += flash
| mit |
Ziqi-Li/bknqgis | bokeh/bokeh/core/compat/bokeh_renderer.py | 6 | 21121 | "Supporting objects and functions to convert Matplotlib objects into Bokeh."
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import warnings
import matplotlib as mpl
import numpy as np
from six import string_types
from ...layouts import gridplot
from ...models import (ColumnDataSource, FactorRange, DataRange1d, DatetimeAxis, GlyphRenderer,
Grid, LinearAxis, Plot, CategoricalAxis, Legend, LegendItem)
from ...models.glyphs import (Asterisk, Circle, Cross, Diamond, InvertedTriangle,
Line, MultiLine, Patches, Square, Text, Triangle, X)
from ...plotting import DEFAULT_TOOLS
from ...plotting.helpers import _process_tools_arg
from ...util.dependencies import import_optional
from ..properties import value
from .mplexporter.renderers import Renderer
from .mpl_helpers import convert_color, convert_dashes, get_props_cycled, xkcd_line
pd = import_optional('pandas')
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class BokehRenderer(Renderer):
def __init__(self, tools, use_pd, xkcd):
"Initial setup."
self.fig = None
self.use_pd = use_pd
self.tools = tools
self.xkcd = xkcd
self.zorder = {}
self.handles = {}
def open_figure(self, fig, props):
"Get the main plot properties and create the plot."
self.width = int(props['figwidth'] * props['dpi'])
self.height = int(props['figheight'] * props['dpi'])
self.plot = Plot(x_range=DataRange1d(),
y_range=DataRange1d(),
plot_width=self.width,
plot_height=self.height)
def close_figure(self, fig):
"Complete the plot: add tools."
# Add tools
tool_objs, tools_map = _process_tools_arg(self.plot, self.tools)
self.plot.add_tools(*tool_objs)
# Simple or Grid plot setup
if len(fig.axes) <= 1:
self.fig = self.plot
self.plot.renderers.sort(key=lambda x: self.zorder.get(x._id, 0))
else:
# This list comprehension splits the plot.renderers list at the "marker"
# points returning small sublists corresponding with each subplot.
subrends = []
for i in range(1, len(self._axes)):
start, end = self._axes[i-1], self._axes[i]
subrends += [self.plot.renderers[start:end]]
plots = []
for i, axes in enumerate(fig.axes):
# create a new plot for each subplot
_plot = Plot(x_range=self.plot.x_range,
y_range=self.plot.y_range,
plot_width=self.width,
plot_height=self.height)
_plot.title.text = ''
# and add new tools
_tool_objs, _tool_map = _process_tools_arg(_plot, DEFAULT_TOOLS)
_plot.add_tools(*_tool_objs)
# clean the plot ref from axis and grids
_plot_rends = subrends[i]
for r in _plot_rends:
if not isinstance(r, GlyphRenderer):
r.plot = None
# add all the renderers into the new subplot
for r in _plot_rends:
if isinstance(r, GlyphRenderer):
_plot.renderers.append(r)
elif isinstance(r, Grid):
_plot.add_layout(r)
else:
if r in self.plot.below:
_plot.add_layout(r, 'below')
elif r in self.plot.above:
_plot.add_layout(r, 'above')
elif r in self.plot.left:
_plot.add_layout(r, 'left')
elif r in self.plot.right:
_plot.add_layout(r, 'right')
_plot.renderers.sort(key=lambda x: self.zorder.get(x._id, 0))
plots.append(_plot)
(a, b, c) = fig.axes[0].get_geometry()
p = np.array(plots)
n = np.resize(p, (a, b))
grid = gridplot(n.tolist())
self.fig = grid
def open_axes(self, ax, props):
"Get axes data and create the axes and grids"
# Get axes, title and grid into class attributes.
self.ax = ax
self.plot.title.text = ax.get_title()
# to avoid title conversion by draw_text later
#Make sure that all information about the axes are passed to the properties
if props.get('xscale', False):
props['axes'][0]['scale'] = props['xscale']
if props.get('yscale', False):
props['axes'][1]['scale'] = props['yscale']
# Add axis
for props in props['axes']:
if props['position'] == "bottom" : location, dim, thing = "below", 0, ax.xaxis
elif props['position'] == "top" : location, dim, thing = "above", 0, ax.xaxis
else: location, dim, thing = props['position'], 1, ax.yaxis
baxis = self.make_axis(thing, location, props)
if dim==0:
gridlines = ax.get_xgridlines()
else:
gridlines = ax.get_ygridlines()
if gridlines:
self.make_grid(baxis, dim, gridlines[0])
def close_axes(self, ax):
"Complete the axes adding axes-dependent plot props"
if hasattr(ax, 'get_facecolor'):
background_fill_color = convert_color(ax.get_facecolor())
else:
background_fill_color = convert_color(ax.get_axis_bgcolor())
self.plot.background_fill_color = background_fill_color
if self.xkcd:
self.plot.title.text_font = "Comic Sans MS, Textile, cursive"
self.plot.title.text_font_style = "bold"
self.plot.title.text_color = "black"
# Add a "marker" Glyph to help the plot.renderers splitting in the GridPlot build
self._axes = getattr(self, "_axes", [0])
self._axes.append(len(self.plot.renderers))
def open_legend(self, legend, props):
lgnd = Legend(location="top_right")
try:
for label, obj in zip(props['labels'], props['handles']):
lgnd.items.append(LegendItem(label=value(label), renderers=[self.handles[id(obj)]]))
self.plot.add_layout(lgnd)
except KeyError:
pass
def close_legend(self, legend):
pass
def draw_line(self, data, coordinates, style, label, mplobj=None):
"Given a mpl line2d instance create a Bokeh Line glyph."
_x = data[:, 0]
if pd and self.use_pd:
try:
x = [pd.Period(ordinal=int(i), freq=self.ax.xaxis.freq).to_timestamp() for i in _x]
except AttributeError: # we probably can make this one more intelligent later
x = _x
else:
x = _x
y = data[:, 1]
if self.xkcd:
x, y = xkcd_line(x, y)
line = Line()
source = ColumnDataSource()
line.x = source.add(x)
line.y = source.add(y)
line.line_color = convert_color(style['color'])
line.line_width = style['linewidth']
line.line_alpha = style['alpha']
line.line_dash = [] if style['dasharray'] is "none" else [int(i) for i in style['dasharray'].split(",")] # str2list(int)
# line.line_join = line2d.get_solid_joinstyle() # not in mplexporter
# line.line_cap = cap_style_map[line2d.get_solid_capstyle()] # not in mplexporter
if self.xkcd:
line.line_width = 3
r = self.plot.add_glyph(source, line)
self.zorder[r._id] = style['zorder']
self.handles[id(mplobj)] = r
def draw_markers(self, data, coordinates, style, label, mplobj=None):
"Given a mpl line2d instance create a Bokeh Marker glyph."
x = data[:, 0]
y = data[:, 1]
marker_map = {
".": Circle,
"o": Circle,
"s": Square,
"+": Cross,
"^": Triangle,
"v": InvertedTriangle,
"x": X,
"d": Diamond,
"D": Diamond,
"*": Asterisk,
}
# Not all matplotlib markers are currently handled; fall back to Circle if we encounter an
# unhandled marker. See http://matplotlib.org/api/markers_api.html for a list of markers.
try:
marker = marker_map[style['marker']]()
except KeyError:
warnings.warn("Unable to handle marker: %s; defaulting to Circle" % style['marker'])
marker = Circle()
source = ColumnDataSource()
marker.x = source.add(x)
marker.y = source.add(y)
marker.line_color = convert_color(style['edgecolor'])
marker.fill_color = convert_color(style['facecolor'])
marker.line_width = style['edgewidth']
marker.size = style['markersize']
marker.fill_alpha = marker.line_alpha = style['alpha']
r = self.plot.add_glyph(source, marker)
self.zorder[r._id] = style['zorder']
self.handles[id(mplobj)] = r
def draw_path(self, data, coordinates, pathcodes, style,
offset=None, offset_coordinates="data", mplobj=None):
warnings.warn("Path drawing has performance issues, please use mpl PathCollection instead")
pass
def draw_path_collection(self, paths, path_coordinates, path_transforms,
offsets, offset_coordinates, offset_order,
styles, mplobj=None):
"Given a mpl PathCollection instance create a Bokeh Marker glyph."
x = offsets[:, 0]
y = offsets[:, 1]
style = styles
warnings.warn("Path marker shapes currently not handled, defaulting to Circle")
marker = Circle()
source = ColumnDataSource()
marker.x = source.add(x)
marker.y = source.add(y)
if len(style['facecolor']) > 1:
fill_color = []
for color in style['facecolor']:
# Apparently there is an issue with ColumnDataSources and rgb/a tuples, converting to hex
fill_color.append('#%02x%02x%02x' % convert_color(tuple(map(tuple,[color]))[0]))
marker.fill_color = source.add(fill_color)
else:
marker.fill_color = convert_color(tuple(map(tuple,style['facecolor']))[0])
if len(style['edgecolor']) > 1:
edge_color = []
for color in style['edgecolor']:
# Apparently there is an issue with ColumnDataSources, line_color, and rgb/a tuples, converting to hex
edge_color.append('#%02x%02x%02x' % convert_color(tuple(map(tuple,[color]))[0]))
marker.line_color = source.add(edge_color)
else:
marker.line_color = convert_color(tuple(map(tuple,style['edgecolor']))[0])
if len(style['linewidth']) > 1:
line_width = []
for width in style['linewidth']:
line_width.append(width)
marker.line_width = source.add(line_width)
else:
marker.line_width = style['linewidth'][0]
if len(mplobj.get_axes().collections) > 1:
warnings.warn("Path marker sizes support is limited and may not display as expected")
marker.size = mplobj.get_sizes()[0]/mplobj.get_axes().collections[-1].get_sizes()[0]*20
else:
marker.size = 5
marker.fill_alpha = marker.line_alpha = style['alpha']
r = self.plot.add_glyph(source, marker)
self.zorder[r._id] = style['zorder']
self.handles[id(mplobj)] = r
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
"Given a mpl text instance create a Bokeh Text glyph."
# mpl give you the title and axes names as a text object (with specific locations)
# inside the plot itself. That does not make sense inside Bokeh, so we
# just skip the title and axes names from the conversion and covert any other text.
if text_type in ['xlabel', 'ylabel', 'title']:
return
if coordinates != 'data':
return
x, y = position
text = Text(x=x, y=y, text=[text])
alignment_map = {"center": "middle", "top": "top", "bottom": "bottom", "baseline": "bottom"}
# baseline not implemented in Bokeh, defaulting to bottom.
text.text_alpha = style['alpha']
text.text_font_size = "%dpx" % style['fontsize']
text.text_color = convert_color(style['color'])
text.text_align = style['halign']
text.text_baseline = alignment_map[style['valign']]
text.angle = style['rotation']
## Using get_fontname() works, but it's oftentimes not available in the browser,
## so it's better to just use the font family here.
#text.text_font = mplText.get_fontname()) not in mplexporter
#text.text_font = mplText.get_fontfamily()[0] # not in mplexporter
#text.text_font_style = fontstyle_map[mplText.get_fontstyle()] # not in mplexporter
## we don't really have the full range of font weights, but at least handle bold
#if mplText.get_weight() in ("bold", "heavy"):
#text.text_font_style = bold
source = ColumnDataSource()
r = self.plot.add_glyph(source, text)
self.zorder[r._id] = style['zorder']
self.handles[id(mplobj)] = r
def draw_image(self, imdata, extent, coordinates, style, mplobj=None):
pass
def make_axis(self, ax, location, props):
"Given a mpl axes instance, returns a Bokeh LinearAxis object."
# TODO:
# * handle log scaling
# * map `labelpad` to `major_label_standoff`
# * deal with minor ticks once BokehJS supports them
# * handle custom tick locations once that is added to bokehJS
tf = props['tickformat']
tv = props['tickvalues']
if tf and any(isinstance(x, string_types) for x in tf):
laxis = CategoricalAxis(axis_label=ax.get_label_text())
assert np.min(tv) >= 0, "Assuming categorical axis have positive-integer dump tick values"
# Seaborn position its categories on dump tick values indented to zero;
# Matplotlib does from 1. We need then different offset given the assumed identation.
offset = np.min(tv) - 1
rng = FactorRange(factors=[str(x) for x in tf], offset=offset)
if location in ["above", "below"]:
self.plot.x_range = rng
else:
self.plot.y_range = rng
else:
if props['scale'] == "linear":
laxis = LinearAxis(axis_label=ax.get_label_text())
elif props['scale'] == "date":
laxis = DatetimeAxis(axis_label=ax.get_label_text())
self.plot.add_layout(laxis, location)
# First get the label properties by getting an mpl.Text object
label = ax.get_label()
self.text_props(label, laxis, prefix="axis_label_")
# Set the tick properties (for now just turn off if necessary)
# TODO: mirror tick properties
if props['nticks'] == 0:
laxis.major_tick_line_color = None
laxis.minor_tick_line_color = None
laxis.major_label_text_color = None
# To get the tick label format, we look at the first of the tick labels
# and assume the rest are formatted similarly.
ticklabels = ax.get_ticklabels()
if ticklabels:
self.text_props(ticklabels[0], laxis, prefix="major_label_")
#newaxis.bounds = axis.get_data_interval() # I think this is the right func...
if self.xkcd:
laxis.axis_line_width = 3
laxis.axis_label_text_font = "Comic Sans MS, Textile, cursive"
laxis.axis_label_text_font_style = "bold"
laxis.axis_label_text_color = "black"
laxis.major_label_text_font = "Comic Sans MS, Textile, cursive"
laxis.major_label_text_font_style = "bold"
laxis.major_label_text_color = "black"
return laxis
def make_grid(self, baxis, dimension, gridline):
"Given a mpl axes instance, returns a Bokeh Grid object."
lgrid = Grid(dimension=dimension,
ticker=baxis.ticker,
grid_line_color=convert_color(gridline.get_color()),
grid_line_width=gridline.get_linewidth())
self.plot.add_layout(lgrid)
def make_line_collection(self, col):
"Given a mpl collection instance create a Bokeh MultiLine glyph."
xydata = col.get_segments()
t_xydata = [np.transpose(seg) for seg in xydata]
xs = [t_xydata[x][0] for x in range(len(t_xydata))]
ys = [t_xydata[x][1] for x in range(len(t_xydata))]
if self.xkcd:
xkcd_xs = [xkcd_line(xs[i], ys[i])[0] for i in range(len(xs))]
xkcd_ys = [xkcd_line(xs[i], ys[i])[1] for i in range(len(ys))]
xs = xkcd_xs
ys = xkcd_ys
multiline = MultiLine()
source = ColumnDataSource()
multiline.xs = source.add(xs)
multiline.ys = source.add(ys)
self.multiline_props(source, multiline, col)
r = self.plot.add_glyph(source, multiline)
self.zorder[r._id] = col.zorder
self.handles[id(col)] = r
def make_poly_collection(self, col):
"Given a mpl collection instance create a Bokeh Patches glyph."
xs = []
ys = []
for path in col.get_paths():
for sub_poly in path.to_polygons():
xx, yy = sub_poly.transpose()
xs.append(xx)
ys.append(yy)
patches = Patches()
source = ColumnDataSource()
patches.xs = source.add(xs)
patches.ys = source.add(ys)
self.patches_props(source, patches, col)
r = self.plot.add_glyph(source, patches)
self.zorder[r._id] = col.zorder
self.handles[id(col)] = r
def multiline_props(self, source, multiline, col):
"Takes a mpl collection object to extract and set up some Bokeh multiline properties."
colors = get_props_cycled(col, col.get_colors(), fx=lambda x: mpl.colors.rgb2hex(x))
colors = [convert_color(x) for x in colors]
widths = get_props_cycled(col, col.get_linewidth())
multiline.line_color = source.add(colors)
multiline.line_width = source.add(widths)
if col.get_alpha() is not None:
multiline.line_alpha = col.get_alpha()
offset = col.get_linestyle()[0][0]
if not col.get_linestyle()[0][1]:
on_off = []
else:
on_off = map(int,col.get_linestyle()[0][1])
multiline.line_dash_offset = convert_dashes(offset)
multiline.line_dash = list(convert_dashes(tuple(on_off)))
def patches_props(self, source, patches, col):
"Takes a mpl collection object to extract and set up some Bokeh patches properties."
face_colors = get_props_cycled(col, col.get_facecolors(), fx=lambda x: mpl.colors.rgb2hex(x))
face_colors = [convert_color(x) for x in face_colors]
patches.fill_color = source.add(face_colors)
edge_colors = get_props_cycled(col, col.get_edgecolors(), fx=lambda x: mpl.colors.rgb2hex(x))
edge_colors = [convert_color(x) for x in edge_colors]
patches.line_color = source.add(edge_colors)
widths = get_props_cycled(col, col.get_linewidth())
patches.line_width = source.add(widths)
if col.get_alpha() is not None:
patches.line_alpha = col.get_alpha()
patches.fill_alpha = col.get_alpha()
offset = col.get_linestyle()[0][0]
if not col.get_linestyle()[0][1]:
on_off = []
else:
on_off = map(int,col.get_linestyle()[0][1])
patches.line_dash_offset = convert_dashes(offset)
patches.line_dash = list(convert_dashes(tuple(on_off)))
def text_props(self, text, obj, prefix=""):
fp = text.get_font_properties()
setattr(obj, prefix+"text_font", fp.get_family()[0])
setattr(obj, prefix+"text_font_size", "%fpt" % fp.get_size_in_points())
setattr(obj, prefix+"text_font_style", fp.get_style())
| gpl-2.0 |
RJT1990/pyflux | pyflux/ensembles/mixture_of_experts.py | 1 | 12285 | import pandas as pd
import numpy as np
class Aggregate():
""" Aggregation Algorithm
Parameters
----------
learning_rate : float
Learning rate for the Aggregation algorithm
loss_type : string
'absolute' or 'squared'
match_window : int
(default: 10) how many of the observations at the end of a model time
series to assess whether the data being used in each model is the same
"""
def __init__(self, learning_rate=1.0, loss_type='absolute', match_window=10):
self.learning_rate = learning_rate
self.data = []
self.model_list = []
self.model_names = []
self.match_window = match_window
self.model_predictions_is = []
self.model_predictions = []
if loss_type == 'absolute':
self.loss_type = self.absolute_loss
self.loss_name = 'Absolute Loss'
elif loss_type == 'squared':
self.loss_type = self.squared_loss
self.loss_name = 'Squared Loss'
else:
raise ValueError('Unidentified loss type entered!')
self.supported_models = ['EGARCH', 'EGARCHM', 'EGARCHMReg', 'GARCH', 'LMEGARCH', 'LMSEGARCH', 'SEGARCH', 'SEGARCHM', 'GAS', 'ARIMA', 'ARIMAX', 'GASLLEV', 'GASLLT', 'GASReg', 'GASX', 'GPNARX', 'LLEV', 'LLT', 'DynReg']
@staticmethod
def absolute_loss(data, predictions):
""" Calculates absolute loss
Parameters
----------
data : np.ndarray
Univariate data
predictions : np.ndarray
Univariate predictions
Returns
----------
- np.ndarray of the absolute loss
"""
return np.abs(data-predictions)
@staticmethod
def squared_loss(data, predictions):
""" Calculates squared loss
Parameters
----------
data : np.ndarray
Univariate data
predictions : np.ndarray
Univariate predictions
Returns
----------
- np.ndarray of the squared loss
"""
return np.square(data-predictions)
def add_model(self, model):
""" Adds a PyFlux model to the aggregating algorithm
Parameters
----------
model : pf.[MODEL]
A PyFlux univariate model
Returns
----------
- Void (changes self.model_list)
"""
if model.model_type not in self.supported_models:
raise ValueError('Model type not supported for Aggregate! Apologies')
if not self.model_list:
self.model_list.append(model)
if model.model_type in ['EGARCH', 'EGARCHM', 'EGARCHMReg', 'GARCH', 'LMEGARCH', 'LMSEGARCH', 'SEGARCH', 'SEGARCHM']:
self.data = np.abs(model.data)
else:
self.data = model.data
self.index = model.index
else:
if model.model_type in ['EGARCH', 'EGARCHM', 'EGARCHMReg', 'GARCH', 'LMEGARCH', 'LMSEGARCH', 'SEGARCH', 'SEGARCHM']:
if np.isclose(np.abs(np.abs(model.data[-self.match_window:])-self.data[-self.match_window:]).sum(),0.0) or model.model_type=='GPNARX':
self.model_list.append(model)
else:
raise ValueError('Data entered is deemed different based on %s last values!' % (s))
else:
if np.isclose(np.abs(model.data[-self.match_window:]-self.data[-self.match_window:]).sum(),0.0) or model.model_type=='GPNARX':
self.model_list.append(model)
else:
raise ValueError('Data entered is deemed different based on %s last values!' % (s))
self.model_names = [i.model_name for i in self.model_list]
def _model_predict(self, h, recalculate=False, fit_once=True):
""" Outputs ensemble model predictions for out-of-sample data
Parameters
----------
h : int
How many steps at the end of the series to run the ensemble on
recalculate: boolean
Whether to recalculate the predictions or not
fit_once : boolean
Whether to fit the model once at the beginning, or with every iteration
Returns
----------
- pd.DataFrame of the model predictions, index of dates
"""
if len(self.model_predictions) == 0 or h != self.h or recalculate is True:
for no, model in enumerate(self.model_list):
if no == 0:
model.fit()
result = model.predict(h)
self.predict_index = result.index
result.columns = [model.model_name]
else:
model.fit()
new_frame = model.predict(h)
new_frame.columns = [model.model_name]
result = pd.concat([result,new_frame], axis=1)
self.model_predictions = result
self.h = h
return result, self.predict_index
else:
return self.model_predictions, self.predict_index
def _model_predict_is(self, h, recalculate=False, fit_once=True):
""" Outputs ensemble model predictions for the end-of-period data
Parameters
----------
h : int
How many steps at the end of the series to run the ensemble on
recalculate: boolean
Whether to recalculate the predictions or not
fit_once : boolean
Whether to fit the model once at the beginning, or with every iteration
Returns
----------
- pd.DataFrame of the model predictions, index of dates
"""
if len(self.model_predictions_is) == 0 or h != self.h or recalculate is True:
for no, model in enumerate(self.model_list):
if no == 0:
result = model.predict_is(h, fit_once=fit_once)
result.columns = [model.model_name]
else:
new_frame = model.predict_is(h, fit_once=fit_once)
new_frame.columns = [model.model_name]
result = pd.concat([result,new_frame], axis=1)
self.model_predictions_is = result
self.h = h
return result
else:
return self.model_predictions_is
def _construct_losses(self, data, predictions, ensemble_prediction):
""" Construct losses for the ensemble and each constitute model
Parameters
----------
data: np.ndarray
The univariate time series
predictions : np.ndarray
The predictions of each constitute model
ensemble_prediction : np.ndarray
The prediction of the ensemble model
Returns
----------
- np.ndarray of the losses for each model
"""
losses = []
losses.append(self.loss_type(data, ensemble_prediction).sum()/data.shape[0])
for model in range(len(self.model_list)):
losses.append(self.loss_type(data, predictions[:,model]).sum()/data.shape[0])
return losses
def tune_learning_rate(self, h, parameter_list=None):
""" Naive tuning of the the learning rate on the in-sample data
Parameters
----------
h : int
How many steps to run Aggregate on
parameter_list: list
List of parameters to search for a good learning rate over
Returns
----------
- Void (changes self.learning_rate)
"""
if parameter_list is None:
parameter_list = [0.001, 0.01, 0.1, 1.0, 10.0, 100.0, 1000.0, 10000.0,100000.0]
for parameter in parameter_list:
self.learning_rate = parameter
_, losses, _ = self.run(h, recalculate=False)
loss = losses[0]
if parameter == parameter_list[0]:
best_rate = parameter
best_loss = loss
else:
if loss < best_loss:
best_loss = loss
best_rate = parameter
self.learning_rate = best_rate
def run(self, h, recalculate=False):
""" Run the aggregating algorithm
Parameters
----------
h : int
How many steps to run the aggregating algorithm on
recalculate: boolean
Whether to recalculate the predictions or not
Returns
----------
- np.ndarray of normalized weights, np.ndarray of losses for each model
"""
data = self.data[-h:]
predictions = self._model_predict_is(h, recalculate=recalculate).values
weights = np.zeros((h, len(self.model_list)))
normalized_weights = np.zeros((h, len(self.model_list)))
ensemble_prediction = np.zeros(h)
for t in range(h):
if t == 0:
weights[t,:] = 100000
ensemble_prediction[t] = np.dot(weights[t,:]/weights[t,:].sum(), predictions[t,:])
weights[t,:] = weights[t,:]*np.exp(-self.learning_rate*self.loss_type(data[t], predictions[t,:]))
normalized_weights[t,:] = weights[t,:]/weights[t,:].sum()
else:
ensemble_prediction[t] = np.dot(weights[t-1,:]/weights[t-1,:].sum(), predictions[t,:])
weights[t,:] = weights[t-1,:]*np.exp(-self.learning_rate*self.loss_type(data[t], predictions[t,:]))
normalized_weights[t,:] = weights[t,:]/weights[t,:].sum()
return normalized_weights, self._construct_losses(data, predictions, ensemble_prediction), ensemble_prediction
def plot_weights(self, h, **kwargs):
""" Plot the weights from the aggregating algorithm
Parameters
----------
h : int
How many steps to run the aggregating algorithm on
Returns
----------
- A plot of the weights for each model constituent over time
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
weights, _, _ = self.run(h=h)
plt.figure(figsize=figsize)
plt.plot(self.index[-h:],weights)
plt.legend(self.model_names)
plt.show()
def predict(self, h, h_train=40):
""" Run out-of-sample predicitons for Aggregate algorithm
(This only works for non-exogenous variable models currently)
Parameters
----------
h : int
How many out-of-sample steps to run the aggregating algorithm on
h_train : int
How many in-sample steps to warm-up the ensemble weights on
Returns
----------
- pd.DataFrame of Aggregate out-of-sample predictions
"""
predictions, index = self._model_predict(h)
normalized_weights = self.run(h=h_train)[0][-1, :]
ensemble_prediction = np.zeros(h)
for t in range(h):
ensemble_prediction[t] = np.dot(normalized_weights, predictions.values[t,:])
result = pd.DataFrame(ensemble_prediction)
result.index = index
return result
def predict_is(self, h):
""" Outputs predictions for the Aggregate algorithm on the in-sample data
Parameters
----------
h : int
How many steps to run the aggregating algorithm on
Returns
----------
- pd.DataFrame of ensemble predictions
"""
result = pd.DataFrame([self.run(h=h)[2]]).T
result.index = self.index[-h:]
return result
def summary(self, h):
"""
Summarize the results for each model for h steps of the algorithm
Parameters
----------
h : int
How many steps to run the aggregating algorithm on
Returns
----------
- pd.DataFrame of losses for each model
"""
_, losses, _ = self.run(h=h)
df = pd.DataFrame(losses)
df.index = ['Ensemble'] + self.model_names
df.columns = [self.loss_name]
return df
| bsd-3-clause |
lorganthesorn/CryptoArb | Analysis/Stats.py | 1 | 1747 | import numpy as np
import pandas as pd
from scipy import stats
from matplotlib import pyplot as plt
from statsmodels.tsa.stattools import grangercausalitytests
from GetHistory import CrpytoCompare
def granger_test(base='BTC', quote='USD', exchange1='Bitfinex', exchange2='BitTrex'):
df_ex1 = CrpytoCompare.crypto_close(base, quote, exchange1)
df_ex2 = CrpytoCompare.crypto_close(base, quote, exchange2)
df = pd.concat([df_ex1.diff(), df_ex2.diff()], axis=1, join='inner')
df.dropna(inplace=True)
#print(df)
print(grangercausalitytests(df, 5))
def correlation(df):
df1 = df.dropna().drop_duplicates()
dfP = df1.dropna(axis=1, how='any')
m = dfP.mean(axis=0)
s = dfP.std(ddof=1, axis=0)
# normalised time-series as an input for PCA
dfPort = (dfP - m)/s
c = np.cov(dfPort.values.T) # covariance matrix
co = np.corrcoef(dfP.values.T) # correlation matrix
tickers = list(dfP.columns)
plt.figure(figsize=(8,8))
plt.imshow(co, cmap="RdGy", interpolation="nearest")
cb = plt.colorbar()
cb.set_label("Correlation Matrix Coefficients")
plt.title("Correlation Matrix", fontsize=14)
plt.xticks(np.arange(len(tickers)), tickers, rotation=90)
plt.yticks(np.arange(len(tickers)), tickers)
# perform PCA
#w, v = np.linalg.eig(c)
#ax = plt.figure(figsize=(8,8)).gca()
#plt.imshow(v, cmap="bwr", interpolation="nearest")
#cb = plt.colorbar()
#plt.yticks(np.arange(len(tickers)), tickers)
#plt.xlabel("PC Number")
#plt.title("PCA", fontsize=14)
# force x-tickers to be displayed as integers (not floats)
#ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.show()
if __name__ == '__main__':
granger_test() | mit |
wkfwkf/statsmodels | statsmodels/discrete/tests/test_constrained.py | 26 | 19635 | # -*- coding: utf-8 -*-
"""
Created on Fri May 30 16:22:29 2014
Author: Josef Perktold
License: BSD-3
"""
from statsmodels.compat.python import StringIO
import numpy as np
from numpy.testing import assert_allclose, assert_equal, assert_
from nose import SkipTest
import pandas as pd
import patsy
from statsmodels.discrete.discrete_model import Poisson
from statsmodels.discrete.discrete_model import Logit
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod import families
from statsmodels.base._constraints import fit_constrained
from statsmodels.tools.tools import add_constant
from statsmodels import datasets
spector_data = datasets.spector.load()
spector_data.exog = add_constant(spector_data.exog, prepend=False)
from .results import results_poisson_constrained as results
from .results import results_glm_logit_constrained as reslogit
DEBUG = False
ss='''\
agecat smokes deaths pyears
1 1 32 52407
2 1 104 43248
3 1 206 28612
4 1 186 12663
5 1 102 5317
1 0 2 18790
2 0 12 10673
3 0 28 5710
4 0 28 2585
5 0 31 1462'''
data = pd.read_csv(StringIO(ss), delimiter='\t')
data['logpyears'] = np.log(data['pyears'])
class CheckPoissonConstrainedMixin(object):
def test_basic(self):
res1 = self.res1
res2 = self.res2
assert_allclose(res1[0], res2.params[self.idx], rtol=1e-6)
# see below Stata has nan, we have zero
bse1 = np.sqrt(np.diag(res1[1]))
mask = (bse1 == 0) & np.isnan(res2.bse[self.idx])
assert_allclose(bse1[~mask], res2.bse[self.idx][~mask], rtol=1e-6)
def test_basic_method(self):
if hasattr(self, 'res1m'):
res1 = (self.res1m if not hasattr(self.res1m, '_results')
else self.res1m._results)
res2 = self.res2
assert_allclose(res1.params, res2.params[self.idx], rtol=1e-6)
# when a parameter is fixed, the Stata has bse=nan, we have bse=0
mask = (res1.bse == 0) & np.isnan(res2.bse[self.idx])
assert_allclose(res1.bse[~mask], res2.bse[self.idx][~mask], rtol=1e-6)
tvalues = res2.params_table[self.idx, 2]
# when a parameter is fixed, the Stata has tvalue=nan, we have tvalue=inf
mask = np.isinf(res1.tvalues) & np.isnan(tvalues)
assert_allclose(res1.tvalues[~mask], tvalues[~mask], rtol=1e-6)
pvalues = res2.params_table[self.idx, 3]
# note most pvalues are very small
# examples so far agree at 8 or more decimal, but rtol is stricter
# see above
mask = (res1.pvalues == 0) & np.isnan(pvalues)
assert_allclose(res1.pvalues[~mask], pvalues[~mask], rtol=5e-5)
ci_low = res2.params_table[self.idx, 4]
ci_upp = res2.params_table[self.idx, 5]
ci = np.column_stack((ci_low, ci_upp))
# note most pvalues are very small
# examples so far agree at 8 or more decimal, but rtol is stricter
# see above: nan versus value
assert_allclose(res1.conf_int()[~np.isnan(ci)], ci[~np.isnan(ci)], rtol=5e-5)
#other
assert_allclose(res1.llf, res2.ll, rtol=1e-6)
assert_equal(res1.df_model, res2.df_m)
# Stata doesn't have df_resid
df_r = res2.N - res2.df_m - 1
assert_equal(res1.df_resid, df_r)
else:
raise SkipTest("not available yet")
def test_other(self):
# some results may not be valid or available for all models
if hasattr(self, 'res1m'):
res1 = self.res1m
res2 = self.res2
if hasattr(res2, 'll_0'):
assert_allclose(res1.llnull, res2.ll_0, rtol=1e-6)
else:
if DEBUG:
import warnings
message = ('test: ll_0 not available, llnull=%6.4F'
% res1.llnull)
warnings.warn(message)
else:
raise SkipTest("not available yet")
class TestPoissonConstrained1a(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
cls.res2 = results.results_noexposure_constraint
cls.idx = [7, 3, 4, 5, 6, 0, 1] # 2 is dropped baseline for categorical
# example without offset
formula = 'deaths ~ logpyears + smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data)
#res1a = mod1a.fit()
# get start_params, example fails to converge on one py TravisCI
k_vars = len(mod.exog_names)
start_params = np.zeros(k_vars)
start_params[0] = np.log(mod.endog.mean())
# if we need it, this is desired params
p = np.array([-3.93478643, 1.37276214, 2.33077032, 2.71338891,
2.71338891, 0.57966535, 0.97254074])
constr = 'C(agecat)[T.4] = C(agecat)[T.5]'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,
start_params=start_params,
fit_kwds={'method': 'bfgs',
'disp': 0})
# TODO: Newton fails
# test method of Poisson, not monkey patched
cls.res1m = mod.fit_constrained(constr, start_params=start_params,
method='bfgs', disp=0)
def test_smoke(self):
# trailing text in summary, assumes it's the first extra string
#NOTE: see comment about convergence in llnull for self.res1m
summ = self.res1m.summary()
assert_('linear equality constraints' in summ.extra_txt)
summ = self.res1m.summary2()
assert_('linear equality constraints' in summ.extra_txt[0])
class TestPoissonConstrained1b(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
cls.res2 = results.results_exposure_constraint
#cls.idx = [3, 4, 5, 6, 0, 1] # 2 is dropped baseline for categorical
cls.idx = [6, 2, 3, 4, 5, 0] # 2 is dropped baseline for categorical
# example without offset
formula = 'deaths ~ smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data,
exposure=data['pyears'].values)
#offset=np.log(data['pyears'].values))
#res1a = mod1a.fit()
constr = 'C(agecat)[T.4] = C(agecat)[T.5]'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,
fit_kwds={'method': 'newton',
'disp': 0})
cls.constraints = lc
# TODO: bfgs fails
# test method of Poisson, not monkey patched
cls.res1m = mod.fit_constrained(constr, method='newton',
disp=0)
class TestPoissonConstrained1c(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
cls.res2 = results.results_exposure_constraint
#cls.idx = [3, 4, 5, 6, 0, 1] # 2 is dropped baseline for categorical
cls.idx = [6, 2, 3, 4, 5, 0] # 2 is dropped baseline for categorical
# example without offset
formula = 'deaths ~ smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data,
offset=np.log(data['pyears'].values))
#res1a = mod1a.fit()
constr = 'C(agecat)[T.4] = C(agecat)[T.5]'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,
fit_kwds={'method': 'newton',
'disp': 0})
cls.constraints = lc
# TODO: bfgs fails
# test method of Poisson, not monkey patched
cls.res1m = mod.fit_constrained(constr, method='newton', disp=0)
class TestPoissonNoConstrained(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
cls.res2 = results.results_exposure_noconstraint
cls.idx = [6, 2, 3, 4, 5, 0] # 1 is dropped baseline for categorical
# example without offset
formula = 'deaths ~ smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data,
#exposure=data['pyears'].values)
offset=np.log(data['pyears'].values))
res1 = mod.fit(disp=0)._results
# res1 is duplicate check, so we can follow the same pattern
cls.res1 = (res1.params, res1.cov_params())
cls.res1m = res1
class TestPoissonConstrained2a(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
cls.res2 = results.results_noexposure_constraint2
cls.idx = [7, 3, 4, 5, 6, 0, 1] # 2 is dropped baseline for categorical
# example without offset
formula = 'deaths ~ logpyears + smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data)
# get start_params, example fails to converge on one py TravisCI
k_vars = len(mod.exog_names)
start_params = np.zeros(k_vars)
start_params[0] = np.log(mod.endog.mean())
# if we need it, this is desired params
p = np.array([-9.43762015, 1.52762442, 2.74155711, 3.58730007,
4.08730007, 1.15987869, 0.12111539])
constr = 'C(agecat)[T.5] - C(agecat)[T.4] = 0.5'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,
start_params=start_params,
fit_kwds={'method': 'bfgs', 'disp': 0})
# TODO: Newton fails
# test method of Poisson, not monkey patched
cls.res1m = mod.fit_constrained(constr, start_params=start_params,
method='bfgs', disp=0)
class TestPoissonConstrained2b(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
cls.res2 = results.results_exposure_constraint2
#cls.idx = [3, 4, 5, 6, 0, 1] # 2 is dropped baseline for categorical
cls.idx = [6, 2, 3, 4, 5, 0] # 2 is dropped baseline for categorical
# example without offset
formula = 'deaths ~ smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data,
exposure=data['pyears'].values)
#offset=np.log(data['pyears'].values))
#res1a = mod1a.fit()
constr = 'C(agecat)[T.5] - C(agecat)[T.4] = 0.5'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,
fit_kwds={'method': 'newton',
'disp': 0})
cls.constraints = lc
# TODO: bfgs fails to converge. overflow somewhere?
# test method of Poisson, not monkey patched
cls.res1m = mod.fit_constrained(constr, method='bfgs', disp=0,
start_params=cls.res1[0])
class TestPoissonConstrained2c(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
cls.res2 = results.results_exposure_constraint2
#cls.idx = [3, 4, 5, 6, 0, 1] # 2 is dropped baseline for categorical
cls.idx = [6, 2, 3, 4, 5, 0] # 2 is dropped baseline for categorical
# example without offset
formula = 'deaths ~ smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data,
offset=np.log(data['pyears'].values))
constr = 'C(agecat)[T.5] - C(agecat)[T.4] = 0.5'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,
fit_kwds={'method':'newton',
'disp': 0})
cls.constraints = lc
# TODO: bfgs fails
# test method of Poisson, not monkey patched
cls.res1m = mod.fit_constrained(constr,
method='bfgs', disp=0,
start_params=cls.res1[0])
class TestGLMPoissonConstrained1a(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
from statsmodels.base._constraints import fit_constrained
cls.res2 = results.results_noexposure_constraint
cls.idx = [7, 3, 4, 5, 6, 0, 1] # 2 is dropped baseline for categorical
# example without offset
formula = 'deaths ~ logpyears + smokes + C(agecat)'
mod = GLM.from_formula(formula, data=data,
family=families.Poisson())
constr = 'C(agecat)[T.4] = C(agecat)[T.5]'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants)
cls.constraints = lc
cls.res1m = mod.fit_constrained(constr)
class TestGLMPoissonConstrained1b(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod import families
from statsmodels.base._constraints import fit_constrained
cls.res2 = results.results_exposure_constraint
cls.idx = [6, 2, 3, 4, 5, 0] # 2 is dropped baseline for categorical
# example with offset
formula = 'deaths ~ smokes + C(agecat)'
mod = GLM.from_formula(formula, data=data,
family=families.Poisson(),
offset=np.log(data['pyears'].values))
constr = 'C(agecat)[T.4] = C(agecat)[T.5]'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants)
cls.constraints = lc
cls.res1m = mod.fit_constrained(constr)._results
def test_compare_glm_poisson(self):
res1 = self.res1m
res2 = self.res2
formula = 'deaths ~ smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data,
exposure=data['pyears'].values)
#offset=np.log(data['pyears'].values))
constr = 'C(agecat)[T.4] = C(agecat)[T.5]'
res2 = mod.fit_constrained(constr, start_params=self.res1m.params,
method='newton', warn_convergence=False,
disp=0)
# we get high precision because we use the params as start_params
# basic, just as check that we have the same model
assert_allclose(res1.params, res2.params, rtol=1e-12)
assert_allclose(res1.bse, res2.bse, rtol=1e-12)
# check predict, fitted, ...
predicted = res1.predict()
assert_allclose(predicted, res2.predict(), rtol=1e-10)
assert_allclose(res1.mu, predicted, rtol=1e-10)
assert_allclose(res1.fittedvalues, predicted, rtol=1e-10)
assert_allclose(res2.predict(linear=True), res2.predict(linear=True),
rtol=1e-10)
class CheckGLMConstrainedMixin(CheckPoissonConstrainedMixin):
# add tests for some GLM specific attributes
def test_glm(self):
res2 = self.res2 # reference results
res1 = self.res1m
#assert_allclose(res1.aic, res2.aic, rtol=1e-10) # far away
# Stata aic in ereturn and in estat ic are very different
# we have the same as estat ic
# see issue #1733
assert_allclose(res1.aic, res2.infocrit[4], rtol=1e-10)
assert_allclose(res1.bic, res2.bic, rtol=1e-10)
# bic is deviance based
#assert_allclose(res1.bic, res2.infocrit[5], rtol=1e-10)
assert_allclose(res1.deviance, res2.deviance, rtol=1e-10)
# TODO: which chi2 are these
#assert_allclose(res1.pearson_chi2, res2.chi2, rtol=1e-10)
class TestGLMLogitConstrained1(CheckGLMConstrainedMixin):
@classmethod
def setup_class(cls):
cls.idx = slice(None)
# params sequence same as Stata, but Stata reports param = nan
# and we have param = value = 0
#res1ul = Logit(data.endog, data.exog).fit(method="newton", disp=0)
cls.res2 = reslogit.results_constraint1
mod1 = GLM(spector_data.endog, spector_data.exog,
family=families.Binomial())
constr = 'x1 = 2.8'
cls.res1m = mod1.fit_constrained(constr)
R, q = cls.res1m.constraints.coefs, cls.res1m.constraints.constants
cls.res1 = fit_constrained(mod1, R, q)
class TestGLMLogitConstrained2(CheckGLMConstrainedMixin):
@classmethod
def setup_class(cls):
cls.idx = slice(None) # params sequence same as Stata
#res1ul = Logit(data.endog, data.exog).fit(method="newton", disp=0)
cls.res2 = reslogit.results_constraint2
mod1 = GLM(spector_data.endog, spector_data.exog,
family=families.Binomial())
constr = 'x1 - x3 = 0'
cls.res1m = mod1.fit_constrained(constr)
R, q = cls.res1m.constraints.coefs, cls.res1m.constraints.constants
cls.res1 = fit_constrained(mod1, R, q)
cls.constraints_rq = (R, q)
def test_predict(self):
# results only available for this case
res2 = self.res2 # reference results
res1 = self.res1m
predicted = res1.predict()
assert_allclose(predicted, res2.predict_mu, atol=1e-7)
assert_allclose(res1.mu, predicted, rtol=1e-10)
assert_allclose(res1.fittedvalues, predicted, rtol=1e-10)
def test_smoke(self):
# trailing text in summary, assumes it's the first extra string
summ = self.res1m.summary()
assert_('linear equality constraints' in summ.extra_txt)
summ = self.res1m.summary2()
assert_('linear equality constraints' in summ.extra_txt[0])
def test_fit_constrained_wrap(self):
# minimal test
res2 = self.res2 # reference results
from statsmodels.base._constraints import fit_constrained_wrap
res_wrap = fit_constrained_wrap(self.res1m.model, self.constraints_rq)
assert_allclose(res_wrap.params, res2.params, rtol=1e-6)
assert_allclose(res_wrap.params, res2.params, rtol=1e-6)
def junk():
# Singular Matrix in mod1a.fit()
formula1 = 'deaths ~ smokes + C(agecat)'
formula2 = 'deaths ~ C(agecat) + C(smokes) : C(agecat)' # same as Stata default
mod = Poisson.from_formula(formula2, data=data, exposure=data['pyears'].values)
res0 = mod.fit()
constraints = 'C(smokes)[T.1]:C(agecat)[3] = C(smokes)[T.1]:C(agecat)[4]'
import patsy
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constraints)
R, q = lc.coefs, lc.constants
resc = mod.fit_constrained(R,q, fit_kwds={'method':'bfgs'})
# example without offset
formula1a = 'deaths ~ logpyears + smokes + C(agecat)'
mod1a = Poisson.from_formula(formula1a, data=data)
print(mod1a.exog.shape)
res1a = mod1a.fit()
lc_1a = patsy.DesignInfo(mod1a.exog_names).linear_constraint('C(agecat)[T.4] = C(agecat)[T.5]')
resc1a = mod1a.fit_constrained(lc_1a.coefs, lc_1a.constants, fit_kwds={'method':'newton'})
print(resc1a[0])
print(resc1a[1])
| bsd-3-clause |
joshloyal/scikit-learn | sklearn/cluster/bicluster.py | 26 | 19870 | """Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils import check_random_state
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
return self
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
A = safe_sparse_dot(array.T, array)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, v = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
vt = v.T
if np.any(np.isnan(u)):
A = safe_sparse_dot(array, array.T)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, u = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause |
gilestrolab/pyrem | src/pyrem/io.py | 1 | 3060 | from pyrem.polygram import Polygram
__author__ = 'quentin'
#todo edf, csv
import scipy.io as scio
import pandas as pd
import numpy as np
from pyrem.time_series import Signal, Annotation
import joblib
from pyrem.time_series import BiologicalTimeSeries, Annotation, Signal
def polygram_from_pkl(filename):
return joblib.load(filename)
def signal_from_pkl(filename):
return joblib.load(filename)
#
# def signal_from_csv(file_name, sampling_freq):
# data = pd.read_csv(file_name, engine="c", header=None, dtype=np.float32)
# return Signal(data, sampling_freq)
def _annotation_from_spike_txt(filename, doubt_chars):
"""
Hacky parser needed because spike does not export epochs of exactly 5.000 s!!!!!!!
:param filename:
:return:
"""
df = pd.read_csv(filename, skiprows=16, sep="\t", names=["t", "x1", "x2", "x3","x4", "x5","y"], header=None, na_values="nan")
ts = df["y"]
ts.index = pd.to_datetime(df["t"] * 1e9)
annotations = ts.dropna()
annotations = annotations.resample("5s", how="first")
annotations = annotations.fillna(method="ffill")
np_ord = np.vectorize(lambda x : ord(x.upper()))
annot_values = np_ord(np.array(annotations)).flatten()
annot_values = annot_values.astype(np.uint8)
annot_probas = [0 if a in doubt_chars else 1 for a in annot_values]
return Annotation(annot_values, 1/5.0, annot_probas, name="vigilance_state", type="vigilance_state")
def polygram_from_spike_matlab_file(signal_filename, annotation_filename, fs, annotation_fs, channel_names, channel_types, doubt_chars,resample_signals, metadata={}):
"""
This function loads a matlab file exported by spike to
as a polygraph.
:param signal_filename: the matlab file name
:return: a polygram
"""
an = _annotation_from_spike_txt(annotation_filename, doubt_chars)
type_for_name = dict(zip(channel_names, channel_types))
matl = scio.loadmat(signal_filename, squeeze_me=True, struct_as_record=False)
data_channels = {}
# annotation_channels = {}
for k in matl.keys():
# exclude metadata such as "__global__", "__version__" ...
if not k.startswith("__"):
obj = matl[k]
channel_number = int(k.split("_")[-1][2:])
if "values" in dir(obj):
channel_id = channel_names[channel_number-1]
data_channels[channel_id] = obj.values
elif "text" in dir(obj):
pass
# annotation_channels["Stage_%i" % (channel_number-1)] = obj.text
del matl
crop_at = np.min([i.size for _,i in data_channels.items()])
for k,a in data_channels.items():
data_channels[k] = a[:crop_at]
signals = [Signal(data,fs, name=name, type=type_for_name[name] ) for name,data in data_channels.items()]
del data_channels
if resample_signals:
signals = [ s.resample(resample_signals) for s in signals]
#signals = [s[:an.duration]for s in signals]
signals.append(an)
return Polygram(signals)
| gpl-3.0 |
johnmcdowall/procedural_city_generation | procedural_city_generation/building_generation/getFoundation.py | 3 | 2136 | import numpy as np
from procedural_city_generation.polygons.Polygon2D import Polygon2D, Edge
def getFoundation(poly, grid_width=0.01, eps=10**-8):
rect_area = 0
rect_height = 0
rect_x = [0,0]
rect_base = None
#Iterate through edges which are bordering a road, find largest
#rectangle for each one
for base in sorted([edge for edge in poly.edges if edge.bordering_road],
key=lambda x: -x.length):
#Initialize height
height = grid_width
done = False
while not done:
cuts = []
for other in poly.edges:
#Find all intersections
if other is not base:
x = [0,0]
try:
x = np.linalg.solve(np.array(((base.dir_vector), (-other.dir_vector))).T,
other[0] - (base[0] + base.n * height))
except np.linalg.LinAlgError:
pass
if eps < x[1] < 1 - eps:
#intersection found
if x[0] < eps:
cuts.append(0)
elif x[0] > 1 - eps:
cuts.append(1)
else:
cuts.append(x[0])
if len(cuts) == 2:
#Possible rectangle found
width = abs(base.length*cuts[1] - base.length*cuts[0])
this_area = width * height
if this_area > rect_area:
rect_area = this_area
rect_height = height
rect_x = cuts
rect_base = base
height += grid_width
else:
done = True
break
if rect_height:
p1 = rect_base[0] + rect_x[1] * rect_base.dir_vector
p2 = rect_base[0] + rect_x[0] * rect_base.dir_vector
p3 = p2 + rect_height * rect_base.n
p4 = p1 + rect_height * rect_base.n
return Polygon2D([p1,p2,p3,p4])
else:
#TODO: assign issue to lenny ... why return false
return False
if __name__=="__main__":
import matplotlib.pyplot as plt
from plot_poly import plot_poly
from getBlock import getBlock
from getLots import getLots
import construct_polygons as cp
polys, vertices = cp.main()
lots = getLots(polys[:20], vertices)
for poly in lots:
if poly.poly_type == "lot":
f = getFoundation(poly)
if f:
plot_poly(poly, mode="k")
plot_poly(f, mode="g")
else:
plot_poly(poly, mode="r")
plt.show()
| mpl-2.0 |
doct-rubens/predator-prey-beetle-wasp | tests/test_basic.py | 1 | 2731 | # -*- coding: utf-8 -*-
#
# Basic testing script that creates a sample universe and world
# with a predefined set of parameters and executes a simulation batch
import numpy as np
import matplotlib.pyplot as plt
from simul.universe import Universe
from simul.world import WonderfulWorld
from simul.control import SimulationControl
from media.plotter import Plotter
# reset the random generator seed (obtain same results for
# debugging purposes)
np.random.seed(42)
# beetle universe laws:
# mfr - male/female ratio - male probability
# lm, lv - lifespan (mean and var, normal distribution)
# amin, amax - initial population age (age, min and max)
# fr - fertility ratio
# om, ov - offspring mean and variance (normal distribution)
# aa - adult age (aa, equal or higher is adult)
# ee - egg age (ea, less or equal is egg)
# rd - random death (chance)
# fly universe laws parameters
fly_params = {'mfr': 0.3, 'lm': 24, 'lv': 4, 'amin': 17, 'amax': 17,
'fr': 1.0, 'om': 28, 'ov': 19, 'aa': 15, 'ee': 9, 'rd': 0.05}
# moth universe laws parameters
moth_params = {'mfr': 0.5, 'lm': 70, 'lv': 2, 'amin': 0, 'amax': 65,
'fr': 1.0, 'om': 60, 'ov': 20, 'aa': 63, 'ee': 10, 'rd': 0.04}
# other parameters:
# pc - predation coefficient
other_params = {'pc': 10.0}
# default costs:
costs = {'fly': 0.0027, 'moth': 0.005}
# initial number of flies and moths
nf = 0
nm = 2488
# number of simulation steps and number of simulations
steps = 200
n_simuls = 1
# image generation params
title = 'test simulation'
parent_path = 'output_images'
path = 'test_simulation'
# 'living','dead','male','female','randomly_killed','old_age_killed','parents','newborn','predation','caterpillars'
columns = ['moth-living', 'fly-living', 'fly-newborn', 'moth-female', 'moth-male']
# output csv file generation params
output_csv_dir = 'outputs'
output_csv_name = 'simul_results'
output_csv = 'all' # can be 'all', 'mean' or 'none'
output_costs = 'mean' # same as above, 'all', 'mean' or 'none'
# create the classes
my_plotter = Plotter(title, path, columns, n_simuls, parent_path=parent_path)
u = Universe(*fly_params.values(), *moth_params.values(), *other_params.values())
w = WonderfulWorld(u, fil=fil, mil=mil)
s = SimulationControl(w, *costs.values(), plotter=my_plotter)
# run a simulation batch
df = s.simulation_batch(nf, nm, steps, n_simuls,
output_csv=output_csv,
output_costs=output_costs,
output_dir=output_csv_dir,
output_name=output_csv_name
)
df[['moth-living', 'fly-living']].plot()
plt.show()
| bsd-2-clause |
dstndstn/astrometry.net | sdss/dr9.py | 2 | 1887 | # This file is part of the Astrometry.net suite.
# Licensed under a 3-clause BSD style license - see LICENSE
from __future__ import print_function
from __future__ import absolute_import
from .common import *
from .dr8 import *
class DR9(DR8):
def __init__(self, **kwargs):
'''
Useful kwargs:
basedir : (string) - local directory where data will be stored.
'''
DR8.__init__(self, **kwargs)
self.dasurl = 'http://data.sdss3.org/sas/dr9/boss/'
def getDRNumber(self):
return 9
def _get_runlist_filename(self):
return self._get_data_file('runList-dr9.par')
if __name__ == '__main__':
sdss = DR9()
rcfb = (2873, 3, 211, 'r')
r,c,f,b = rcfb
bandnum = band_index(b)
sdss.retrieve('psField', *rcfb)
psfield = sdss.readPsField(r,c,f)
dg = psfield.getDoubleGaussian(bandnum, normalize=True)
psf = psfield.getPsfAtPoints(bandnum, 2048/2., 1489./2.)
import matplotlib
matplotlib.use('Agg')
import pylab as plt
import numpy as np
H,W = psf.shape
cx,cy = (W/2, H/2)
DX,DY = np.meshgrid(np.arange(W)-cx, np.arange(H)-cy)
(a1,s1, a2,s2) = dg
R2 = (DX**2 + DY**2)
G = (a1 / (2.*np.pi*s1**2) * np.exp(-R2/(2.*s1**2)) +
a2 / (2.*np.pi*s2**2) * np.exp(-R2/(2.*s2**2)))
print('G sum', G.sum())
print('psf sum', psf.sum())
psf /= psf.sum()
plt.clf()
plt.subplot(2,2,1)
ima = dict(interpolation='nearest', origin='lower')
plt.imshow(psf, **ima)
plt.subplot(2,2,2)
plt.imshow(G, **ima)
plt.subplot(2,2,3)
plt.plot(psf[H/2,:], 'rs-', mec='r', mfc='none')
plt.plot(G[H/2,:], 'gx-')
plt.subplot(2,2,4)
plt.semilogy(np.maximum(1e-6, psf[H/2,:]), 's-', mec='r', mfc='none')
plt.semilogy(np.maximum(1e-6, G[H/2,:]), 'gx-')
plt.savefig('psf1.png')
| bsd-3-clause |
glwagner/py2Periodic | tests/twoLayerQG/testBathymetryTwoLayerQG.py | 1 | 1572 | import time, sys
import numpy as np
import matplotlib.pyplot as plt
sys.path.append('../../')
from py2Periodic.physics import twoLayerQG
from numpy import pi
params = {
'f0' : 1.0e-4,
'Lx' : 1.0e6,
'beta' : 1.5e-11,
'defRadius' : 1.5e4,
'H1' : 500.0,
'H2' : 2000.0,
'U1' : 2.5e-2,
'U2' : 0.0,
'bottomDrag' : 1.0e-7,
'nx' : 128,
'ny' : 128,
'dt' : 1.0e3,
'visc' : 0.0e8,
'viscOrder' : 4.0,
'timeStepper': 'RK4',
'nThreads' : 4,
'useFilter' : True,
'flatBottom' : True,
}
# Create the two-layer model
qg = twoLayerQG.model(**params)
qg.describe_model()
# Initial condition:
Ro = 1.0e-3
f0 = 1.0e-4
q1 = Ro*f0*np.random.standard_normal(qg.physVarShape)
q2 = Ro*f0*np.random.standard_normal(qg.physVarShape)
qg.set_q1_and_q2(q1, q2)
# Bathymetry
R = qg.Lx/20
(x0, y0) = (qg.Lx/2.0, qg.Ly/2.0)
h = 0.1*qg.H2*np.exp( (-(qg.x-x0)**2.0 - (qg.y-y0)**2.0)/(2.0*R**2.0) )
qg.set_bathymetry(h)
# Run a loop
nt = 1e3
for ii in np.arange(0, 1e3):
qg.step_nSteps(nSteps=nt, dnLog=nt)
qg.update_state_variables()
fig = plt.figure('Perturbation vorticity', figsize=(8, 8)); plt.clf()
plt.subplot(221); plt.imshow(qg.q1)
plt.subplot(222); plt.imshow(qg.q2)
plt.subplot(223); plt.imshow(np.abs(qg.soln[0:qg.ny//2-1, :, 0]))
plt.subplot(224); plt.imshow(np.abs(qg.soln[0:qg.ny//2-1, :, 1]))
plt.pause(0.01), plt.draw()
print("Close the plot to end the program")
plt.show()
| mit |
nanditav/15712-TensorFlow | tensorflow/contrib/learn/__init__.py | 1 | 1719 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# TODO(ptucker,ipolosukhin): Improve descriptions.
"""High level API for learning with TensorFlow.
## Estimators
Train and evaluate TensorFlow models.
@@BaseEstimator
@@Estimator
@@ModeKeys
@@DNNClassifier
@@DNNRegressor
@@LinearClassifier
@@LinearRegressor
## Graph actions
Perform various training, evaluation, and inference actions on a graph.
@@NanLossDuringTrainingError
@@RunConfig
@@evaluate
@@infer
@@run_feeds
@@run_n
@@train
## Input processing
Queue and read batched input data.
@@extract_dask_data
@@extract_dask_labels
@@extract_pandas_data
@@extract_pandas_labels
@@extract_pandas_matrix
@@read_batch_examples
@@read_batch_features
@@read_batch_record_features
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import make_all
__all__ = make_all(__name__)
__all__.append('datasets')
| apache-2.0 |
WillemWybo/SGF_formalism | source/functionFitter.py | 1 | 43613 | """
Author: Willem Wybo
Date: 18/08/2015
Place: BBP, Geneva
"""
import numpy as np
import numpy.polynomial.polynomial as npol
import scipy.linalg as la
import scipy.optimize as op
import scipy.integrate as integ
import matplotlib.pyplot as pl
from scipy.cluster.vq import kmeans2
from scipy.cluster.vq import kmeans
import copy
import math
import warnings
import itertools
import types
class Fitter(object):
def der(self, x, arr):
dx = x[1] - x[0]
diffarr = (arr[1:] - arr[0:-1]) / dx
return diffarr, x[0:-1] + dx/2
def derder(self, x, arr):
dx = x[1] - x[0]
diffarr, _ = self.der(x, arr)
diffdiffarr = (diffarr[1:] - diffarr[0:-1]) / dx
return diffdiffarr, x[1:-1]
def zerocrossing(self, x, arr):
arr = copy.copy(arr)
inds = np.where(np.diff(np.sign(arr)))[0]
return inds, x[inds]
def find_nearest(self, array, value):
idx = (np.abs(array-value)).argmin()
return array[idx], idx
class ExpFitter(Fitter):
def sumExp(self, x, a, c, flat=True):
if flat:
return (np.exp(x[:,None]*a[:,None].T).dot(c[:,None])).flatten().real
else:
return np.exp(x[:,None]*a[:,None].T).dot(c[:,None]).real
def PronyExpFit(self, deg, x, y):
'''
Construct a sum of exponentials fit to a given time-sequence y by
using prony's method
input:
[deg]: int, number of exponentials
[x]: numpy array, sequence of regularly spaced points at which y is evaluated
[y]: numpy array, sequence
output:
[a]: numpy array, exponential coefficient
[c]: numpy array, exponentail magnitudes
[rms]: float, root mean square error of the data
'''
# stepsize
h = x[1] - x[0]
#Build matrix
A = la.hankel(y[:-deg],y[-deg-1:])
a = -A[:,:deg]
b = A[:,deg]
#Solve it
s = la.lstsq(a,b)[0]
#Solve polynomial
p = np.flipud(np.hstack((s,1)))
u = np.roots(p)
#Only keep roots in unit circle
inds = np.where(np.logical_and((np.abs(u) < 1.), \
np.logical_not(np.logical_and(np.imag(u) == 0., np.real(u) <= 0.))))[0]
u = u[inds]
#Calc exponential factors
a = np.log(u)/h
#Build power matrix
A = np.power((np.ones((len(y),1))*u[:,None].T),np.arange(len(y))[:,None]*np.ones((1,len(inds))))
#solve it
f = la.lstsq(A,y)[0]
#calc amplitudes
c = f/np.exp(a*x[0])
#build x, approx and calc rms
approx = self.sumExp(x, a, c).real
rms = np.sqrt(((approx-y)**2).sum() / len(y))
return a, c, rms
def construct_Hankel_matrices(self, y):
ind0 = int(len(y)/2)
# original and shifted hankel matrix
H0 = la.hankel(y[0:ind0], y[ind0-1:2*ind0-1])
H1 = la.hankel(y[1:ind0+1], y[ind0:2*ind0])
return H0, H1
# def Z_McE_ExpFit(self, x, y, deg=2):
# # construct the Hankel matrices
# H0, H1 = self.construct_Hankel_matrices(y)
# # compute the singular value decomposition
# U, s, Vh = la.svd(H0)
# U_ = U[:, 0:deg]
# Vh_ = Vh[0:deg, :]
# s_ = s[0:deg]
# # compute system matrix
# F0 = np.diag(1./np.sqrt(s_)).dot(U_.T)
# F1 = Vh_.T.dot(np.diag(1./np.sqrt(s_)))
# A = F0.dot(H1.dot(F1))
# # find eigenvalues of system matrix
# u, v = la.eig(A)
# # system time-scales (inverse)
# alphas = np.log(u) / dx
# return alphas
def fitExp_Z_McE(self, x, y, rtol=1e-2, maxdeg=10):
deg = 1; rms = 1.
# stepsize
dx = x[1] - x[0]
# construct the Hankel matrices
H0, H1 = self.construct_Hankel_matrices(y)
# compute the singular value decomposition
U, s, Vh = la.svd(H0)
# loop over increasing number of exponentials
while rms > rtol and deg < maxdeg:
U_ = U[:, 0:deg]
Vh_ = Vh[0:deg, :]
s_ = s[0:deg]
# compute system matrix
F0 = np.diag(1./np.sqrt(s_)).dot(U_.T)
F1 = Vh_.T.dot(np.diag(1./np.sqrt(s_)))
A = F0.dot(H1.dot(F1))
# find eigenvalues of system matrix
u, v = la.eig(A)
# system time-scales (inverse)
alphas = np.log(u.real) / dx
# solve weights
A = np.exp(x[:,None] * alphas[None, :] * dx)
gammas = la.lstsq(A,y)[0]
# compute rmse
approx = self.sumExp(x, alphas, gammas)
rms = np.sqrt(((approx-y)**2).sum() / len(y))
# increase degree
deg += 1
return alphas, gammas, rms
def reduceSeries(self, a, c, x, y, rtol=1e-2):
'''
Reduces the number of exponential terms in a series, till a given tolerance
is reached
input:
[a]: numpy array of exponential timescales
[c]: numpy array of exponential magnitudes
[x]: numpy array of x-values at which the function is evaluated
[y]: numpy array of function values
[rtol]: float, relative tolerance given the largest function value
output:
[alpha]: exponential coefficients
[gamma]: magnitudes
[rms]: float, root mean square error
'''
k = 1; rms = 2*rtol
while rms > rtol and k <= len(a):
sortind = np.argsort(np.abs(c))[::-1]
alpha = a[sortind][0:k]
gamma = c[sortind][0:k]
approx = self.sumExp(x, alpha, gamma).real
rms = np.sqrt(((np.abs(approx-y) / np.max(np.abs(y)))**2).sum() / len(y))
k += 1
return alpha, gamma, rms
def fitExp(self, x, y, deg=30, rtol=1e-2, surface=False, A=None):
a, c, rms = self.PronyExpFit(deg, x, y)
alpha, gamma, rms = self.reduceSeries(a, c, x, y, rtol=rtol)
if surface:
dx = x[1] - x[0]
if A == None:
A = dx * np.sum(y)
Afit = np.sum(gamma * (np.exp(alpha*x[-1]) - np.exp(alpha*x[0])) / alpha)
gamma = gamma * A / Afit
approx = self.sumExp(x, alpha, gamma).real
rms = np.sqrt(((np.abs(approx-y) / np.max(np.abs(y)))**2).sum() / len(y))
return alpha, gamma, rms
class fExpFitter(Fitter):
def sumFExp(self, s, alphas, gammas):
return np.sum(self.fexps(s, alphas, gammas), 0)
def fexps(self, s, alphas, gammas):
return gammas[:,None] / (alphas[:,None] + s[None,:])
def trialFunFit(self, s, arr, alphas, pairs=None):
# construct matrix for extended fitting problem
A = np.concatenate((1. / (s[:,None] + alphas[None,:]), \
arr[:,None] / (s[:,None] + alphas[None,:])), axis=1)
# find auxiliary residues
c = la.lstsq(A, arr)[0][-len(alphas):]
# find zeros of fitted auxiliary function
H = np.diag(alphas) - np.dot(np.ones((len(alphas),1), dtype=complex), c[None,:])
alphanew = np.linalg.eig(H)[0]
# find real residues
Anew = 1. / (s[:,None] + alphanew[None,:])
cnew = la.lstsq(Anew, arr)[0]
return alphanew, cnew, None
def trialFunFit_constrained(self, s, arr, alphas, pairs, zerostart=False):
deg = len(alphas)
carr = np.concatenate((arr.real, arr.imag))
# construct matrix for extended fitting problem
A = np.concatenate((1. / (s[:,None] + alphas[None,:]), \
arr[:,None] / (s[:,None] + alphas[None,:])), axis=1)
# implement the constraint
pairsnew = np.concatenate((pairs, pairs))
for i, p in enumerate(pairsnew):
if p:
x1 = A[:,i] + A[:,i+1]
x2 = 1j * (A[:,i] - A[:,i+1])
A[:,i] = x1
A[:,i+1] = x2
A = np.concatenate((A.real, A.imag), axis=0)
# find auxiliary residues
c = la.lstsq(A, carr)[0][-len(alphas):]
# find zeros of fitted auxiliary function
a = np.diag(alphas)
b = np.ones(deg)
# implement similarity transform
for i, p in enumerate(pairs):
if p:
a[i:i+2, i:i+2] = np.array([[alphas[i].real, alphas[i].imag], \
[-alphas[i].imag, alphas[i].real]])
b[i:i+2] = np.array([2,0])
H = a.real - np.dot(b[:,None], c[None,:])
alphanew = np.linalg.eig(H)[0]
inds = np.argsort(alphanew)
alphanew = alphanew[inds]
# indicates where pairs of complex conjugate poles occur
auxarr = np.abs((np.abs(alphanew[:-1]) - np.abs(alphanew[1:])) / np.abs(alphanew[:-1]))
auxarr2 = np.abs(alphas.imag) > 1e-15
pairs = np.logical_and(np.concatenate((auxarr < 1e-15, np.zeros(1, dtype=bool))), auxarr2)
# find residues
Anew = 1. / (s[:,None] + alphanew[None,:])
for i, p in enumerate(pairs):
if p:
x1 = Anew[:,i] + Anew[:,i+1]
x2 = 1j * (Anew[:,i] - Anew[:,i+1])
Anew[:,i] = x1
Anew[:,i+1] = x2
Anew = np.concatenate((Anew.real, Anew.imag), axis=0)
if zerostart:
# enforce K(t=0)=0 constraint
row1 = np.ones(2*deg)
for i, p in enumerate(pairs):
if p:
row1[i+1] = 0
Anew = np.concatenate((np.ones((1, deg), dtype=complex), Anew), axis=0)
carr = np.concatenate((np.zeros(1, dtype=complex), carr))
cnew = la.lstsq(Anew, carr)[0]
cnew = np.array(cnew, dtype=complex)
# recast cnew to complex values
for i, p in enumerate(pairs):
if p:
cnew[i:i+2] = np.array([cnew[i] + 1j * cnew[i+1], cnew[i] - 1j * cnew[i+1]])
return alphanew, cnew, pairs
def fit_residues(self, s, arr, alphas, pairs):
carr = np.concatenate((arr.real, arr.imag))
A = 1. / (s[:,None] + alphas[None,:])
for i, p in enumerate(pairs):
if p:
x1 = A[:,i] + A[:,i+1]
x2 = 1j * (A[:,i] - A[:,i+1])
A[:,i] = x1
A[:,i+1] = x2
A = np.concatenate((A.real, A.imag), axis=0)
cnew = la.lstsq(A, carr)[0]
cnew = np.array(cnew, dtype=complex)
# recast cnew to complex values
for i, p in enumerate(pairs):
if p:
cnew[i:i+2] = np.array([cnew[i] + 1j * cnew[i+1], cnew[i] - 1j * cnew[i+1]])
return cnew
def trialFunFit_constrained_2d(self, s, arr2d, alphas, pairs):
print '>>> multifun fit test v2 <<<'
deg = len(alphas)
# construct f array
arr1d = np.array([], dtype=complex)
for ind, arr in enumerate(arr2d):
arr1d = np.concatenate((arr1d, arr))
# construct matrix A
ns = len(s)
ncols = (len(arr2d) + 1) * deg
nrows = len(arr1d)
A = np.zeros((nrows, ncols), dtype=complex)
for ind, fis in enumerate(arr1d):
indA = int(ind/ns)
A[ind,deg*indA:deg*(indA+1)] = 1./(s[ind%ns] + alphas)
# try:
# A[ind,deg*indA:deg*(indA+1)] = 1./(s[ind%ns] + alphas)
# except ValueError:
# print indA
# print deg*indA
# print deg*(indA+1)
# print ncols
A[ind,-deg:] = -fis / (s[ind%ns] + alphas)
# implement the constraint
for j in range(len(arr2d) + 1):
for i, p in enumerate(pairs):
if p:
x1 = A[:,j*deg+i] + A[:,j*deg+i+1]
x2 = 1j * (A[:,j*deg+i] - A[:,j*deg+i+1])
A[:,j*deg+i] = x1
A[:,j*deg+i+1] = x2
A = np.concatenate((A.real, A.imag), axis=0)
arr1d = np.concatenate((arr1d.real, arr1d.imag))
# find auxiliary residues
c = la.lstsq(A, arr1d)[0][-len(alphas):]
print 'cnew: ', c
# find zeros of fitted auxiliary function
a = np.diag(alphas)
b = np.ones(deg)
# implement similarity transform
for i, p in enumerate(pairs):
if p:
a[i:i+2, i:i+2] = np.array([[alphas[i].real, alphas[i].imag], \
[-alphas[i].imag, alphas[i].real]])
b[i:i+2] = np.array([2,0])
# compute zeros of sum sigmafit
H = a.real - np.dot(b[:,None], c[None,:])
print 'H: ', H
alphanew = np.linalg.eig(H)[0]
print 'alphanew: ', alphanew
inds = np.argsort(alphanew)
alphanew = alphanew[inds]
# indicates where pairs of complex conjugate poles occur
auxarr = np.abs((np.abs(alphanew[:-1]) - np.abs(alphanew[1:])) / np.abs(alphanew[:-1]))
auxarr2 = np.abs(alphanew.imag) > 1e-15 # np.abs(alphas.imag) > 1e-15
pairs = np.logical_and(np.concatenate((auxarr < 1e-15, np.zeros(1, dtype=bool))), auxarr2)
# find residues
# compute matrix for residue calculation
Anew = 1. / (s[:,None] + alphanew[None,:])
for i, p in enumerate(pairs):
if p:
x1 = Anew[:,i] + Anew[:,i+1]
x2 = 1j * (Anew[:,i] - Anew[:,i+1])
Anew[:,i] = x1
Anew[:,i+1] = x2
Anew = np.concatenate((Anew.real, Anew.imag), axis=0)
# compute residues
c2dnew = np.zeros((arr2d.shape[0], deg), dtype=complex)
for ind, arr in enumerate(arr2d):
carr = np.concatenate((arr.real, arr.imag))
cnew = la.lstsq(Anew, carr)[0]
cnew = np.array(cnew, dtype=complex)
# recast cnew to complex values
for i, p in enumerate(pairs):
if p:
cnew[i:i+2] = np.array([cnew[i] + 1j * cnew[i+1], cnew[i] - 1j * cnew[i+1]])
c2dnew[ind,:] = cnew
print 'cnew: ', c2dnew
return alphanew, c2dnew, pairs
def reduceSeries(self, s, y, a, c, pairs=None, rtol=1e-2, pprint=True):
'''
reduce the series of exponentials after the fitting
'''
k = 1; rms = 1.
# ensure stability of approximation
inds = np.where(a.real > 0.)[0]
a = a[inds]; c = c[inds];
if pairs != None: pairs = pairs[inds]
# construct indices for ranking the exponentials
pairs_alltrue = copy.copy(pairs)
for i,p in enumerate(pairs):
if p: pairs_alltrue[i+1] = True
magnitudes = np.zeros(a.shape)
for i in range(len(pairs_alltrue)):
if pairs_alltrue[i]:
c_ = c[i].real; c__ = c[i].real
a_ = a[i].real; a__ = a[i].real
magnitudes[i] = (c_*a_ + c__*a__) / (a_**2 + a__**2)
else:
magnitudes[i] = c[i].real / a[i].real
sortind = np.argsort(np.abs(magnitudes))[::-1]
anew = copy.copy(a[sortind])
alphas = anew
cnew = copy.copy(c[sortind])
gammas = cnew
# look for pairs to be sure they are correct
auxarr = np.abs((np.abs(alphas[:-1]) - np.abs(alphas[1:])) / np.abs(alphas[:-1]))
auxarr2 = np.abs(alphas.imag) > 1e-15
pairs = np.logical_and(np.concatenate((auxarr < 1e-15, np.zeros(1, dtype=bool))), auxarr2)
npairs = copy.copy(pairs)
approx = self.sumFExp(s, alphas, gammas)
while rms > rtol and k < len(a)+1:
if (pairs != None) and pairs[k-1]:
k += 1
alphas = anew[0:k]
gammas = cnew[0:k]
auxarr = np.abs((np.abs(alphas[:-1]) - np.abs(alphas[1:])) / np.abs(alphas[:-1]))
auxarr2 = np.abs(alphas.imag) > 1e-15
npairs = np.logical_and(np.concatenate((auxarr < 1e-15, np.zeros(1, dtype=bool))), auxarr2)
approx = self.sumFExp(s, alphas, gammas)
rms = np.sqrt(((np.abs(approx-y) / np.max(np.abs(y)))**2).sum() / len(y))
k += 1
if pprint:
pairinds = copy.copy(npairs)
inds = np.where(npairs)[0]
for i in inds:
pairinds[i+1] = True
inds = np.where(np.logical_not(pairinds))[0]
if len(inds) > 0:
if np.max(np.abs(alphas[inds].imag)) > 1e-6:
print '!!! Warning: invalid pairs !!!'
print 'original alphas: ', anew
print 'original gammas: ', cnew
print 'original pairs: ', pairs
print 'new alphas: ', alphas
print 'new gammas: ', gammas
print 'new pairs: ', npairs
return alphas, gammas, rms, approx, npairs
def _find_start_nodes(self, s, deg, realpoles, initpoles):
if initpoles == 'lin':
trialpoles = np.linspace(s[int(len(s)/2.)+1].imag, s[-1].imag, deg)
elif initpoles == 'log10':
trialpoles = np.logspace(1, np.log10(s[-1].imag), num=deg, base=10)
elif initpoles == 'log':
trialpoles = np.logspace(1, np.log(s[-1].imag), num=deg, base=math.e)
elif initpoles == 'random':
trialpoles = s[-1].imag * np.random.rand(deg)
else:
raise Exception('initpoles invalid')
if realpoles:
pairs = np.zeros(trialpoles.shape, dtype=bool)
else:
trialpoles = np.array([[tp + 1j*tp, tp - 1j*tp] for tp in trialpoles]).flatten()
pairs = np.array([[True, False] for _ in range(deg)]).flatten()
return trialpoles, pairs
def _run_fit(self, s, y, trialpoles, pairs, rtol, maxiter, constrained, zerostart, pole_flip=True, pprint=True):
'''
performs iterations of the actual fitting process
'''
k = 0; rms = rtol+1.
l = 0; m = 0
alist = []; clist = []; rmslist = []; pairslist = []
trialpoles_orig = copy.copy(trialpoles)
pairs_orig = copy.copy(pairs)
if constrained:
while rms > rtol and k < maxiter:
a, c, pairs = self.trialFunFit_constrained(s, y, trialpoles, pairs, zerostart=zerostart)
approx = self.sumFExp(s, a, c)
rms = np.sqrt(((np.abs(approx-y) / np.max(np.abs(y)))**2).sum() / len(y))
# if unstable poles, make sure to run again
# if np.min(a) < 0.:
# rms = rtol + 1.
# if m < 10.:
# if pole_flip:
ind = np.where(a < 0.)[0] # where poles are unstable
if len(ind) > 0:
a[ind] *= -1.
c = self.fit_residues(s, y, a, pairs)
approx = self.sumFExp(s, a, c)
rms = np.sqrt(((np.abs(approx-y) / np.max(np.abs(y)))**2).sum() / len(y))
# if rms < rtol:
# alist.append(copy.deepcopy(a)); clist.append(copy.deepcopy(c)); rmslist.append(rms); pairslist.append(pairs)
# else:
# ind = np.where(a > 0.)[0] # where poles are stable
# newpole, newpair = self._find_start_nodes(s, len(a)-len(ind), True, 'random')
# trialpoles = np.concatenate((a[ind], newpole))
# pairs = np.concatenate((pairs[ind], newpair))
# else:
# trialpoles, pairs = self._find_start_nodes(s, len(trialpoles_orig), True, 'random')
# m = 0
# l += 1; m += 1
# else:
alist.append(copy.deepcopy(a)); clist.append(copy.deepcopy(c)); rmslist.append(rms); pairslist.append(pairs)
trialpoles = copy.copy(a)
k += 1
if pprint and l > 5:
print 'Often found unstable poles (' + str(l) + ' times)'
return alist, clist, rmslist, pairslist
else:
while rms > rtol and k < maxiter:
a, c,_ = self.trialFunFit(s, y, trialpoles, zerostart)
approx = self.sumFExp(s, a, c)
rms = np.sqrt(((np.abs(approx-y) / np.max(np.abs(y)))**2).sum() / len(y))
trialpoles = a
k += 1
return alist, clist, rmslist, None
def _run_fit_vector(self, s, ys, trialpoles, pairs, rtol, maxiter):
# eps = 2.
# k = 0; rms = 1.
# rms_ = rms
# alist = []; clist = []; rmslist = []; pairslist = []
# while rms > rtol and k < maxiter:
# a, c2d, pairs = self.trialFunFit_constrained_2d(s, ys, trialpoles, pairs)
# rms = 0.
# for ind, y in enumerate(ys):
# approx = self.sumFExp(s, a, c2d[ind])
# rms += np.sqrt(((np.abs(approx-y) / np.max(np.abs(y)))**2).sum() / len(y))
# alist.append(copy.deepcopy(a)); clist.append(copy.deepcopy(c2d)); rmslist.append(rms); pairslist.append(pairs)
# # randomize poles a bit
# skip = False
# tp = copy.deepcopy(a)
# if (rms_ - rms) / rms_ < eps:
# for i, p in enumerate(pairs):
# if not skip:
# if p:
# x1 = 0.1 * tp[i].real; x2 = 0.1 * np.abs(tp[i].imag)
# r1 = x1 * (2. * np.random.rand() - 1); r2 = x2 * (2. * np.random.rand() - 1)
# tp[i:i+2] = np.array([tp[i] + r1 + 1j*r2, tp[i+1] + r1 - 1j*r2])
# skip = True
# else:
# x = 0.1 * tp[i]
# r = x * (2. * np.random.rand() - 1)
# tp[i] += r
# skip = False
# trialpoles = tp
# k += 1
# rms_ = rms
eps = 2.
k = 0; rms = 1.
rms_ = rms
alist = []; clist = []; rmslist = []; pairslist = []
while rms > rtol and k < maxiter:
a2d = np.zeros((len(ys), len(trialpoles)), dtype=complex)
c2d = np.zeros((len(ys), len(trialpoles)), dtype=complex)
pairs2d = np.zeros((len(ys), len(trialpoles)), dtype=bool)
for ind, y in enumerate(ys):
a2d[ind], c2d[ind], pairs2d[ind] = self.trialFunFit_constrained(s, y, trialpoles, pairs)
# put complex conjugates with positive part first
for i, p in enumerate(pairs2d[ind]):
if p:
if a2d[ind,i] < 0:
a2d[ind,i] = a2d[ind,i].real - 1j * a2d[ind,i].imag
a2d[ind,i+1] = a2d[ind,i+1].real - 1j * a2d[ind,i+1].imag
c2d[ind,i] = c2d[ind,i].real - 1j * c2d[ind,i].imag
c2d[ind,i+1] = c2d[ind,i+1].real - 1j * c2d[ind,i+1].imag
a, pairs = self._Kmeans(a2d, pairs2d)
c2d = np.zeros((len(ys), len(a)), dtype=complex)
for ind, y in enumerate(ys):
c2d[ind] = self.fit_residues(s, y, a, pairs)
approx = self.sumFExp(s, a, c2d[ind])
rms += np.sqrt(((np.abs(approx-y) / np.max(np.abs(y)))**2).sum() / len(y))
alist.append(copy.deepcopy(a)); clist.append(copy.deepcopy(c2d)); rmslist.append(rms); pairslist.append(pairs)
# randomize poles a bit
skip = False
tp = copy.deepcopy(a)
if (rms_ - rms) / rms_ < eps:
for i, p in enumerate(pairs):
if not skip:
if p:
x1 = 0.1 * tp[i].real; x2 = 0.1 * np.abs(tp[i].imag)
r1 = x1 * (2. * np.random.rand() - 1); r2 = x2 * (2. * np.random.rand() - 1)
tp[i:i+2] = np.array([tp[i] + r1 + 1j*r2, tp[i+1] + r1 - 1j*r2])
skip = True
else:
x = 0.1 * tp[i]
r = x * (2. * np.random.rand() - 1)
tp[i] += r
skip = False
trialpoles = tp
k += 1
return alist, clist, rmslist, pairslist
def _Kmeans(self, a2d, pairs2d): # do the kmeans algorithm to make sure all nodes are the same
a1d = np.array([], dtype=complex)
for i, a in enumerate(a2d):
# determine the coefficients not to take into account in the algorithm
paux = np.concatenate((np.array([False]), pairs2d[i,:-1]))
inds = np.where(np.logical_not(paux))[0]
a1d = np.concatenate((a1d,a[inds]))
adata = np.concatenate((a1d.real[:,None], a1d.imag[:,None]), 1)
astart = np.concatenate((a2d[-1].real[inds][:,None], a2d[-1].imag[inds][:,None]), 1)
a = kmeans(adata, astart)[0]
# check for complex conjugates
anew = []; pairsnew = []
for alpha in a:
if np.abs(alpha[1]) > 1e-9:
anew.append(alpha[0] + 1j*alpha[1])
anew.append(alpha[0] - 1j*alpha[1])
pairsnew.append(True)
pairsnew.append(False)
else:
anew.append(alpha[0] + 1j*0.)
pairsnew.append(False)
# a = a[:,0] + 1j* a[:,1]
# look for pairs to be sure they are correct
# auxarr = np.abs((np.abs(a[:-1]) - np.abs(a[1:])) / np.abs(a[:-1]))
# auxarr2 = np.abs(a.imag) > 1e-15
# pairs = np.logical_and(np.concatenate((auxarr < 1e-15, np.zeros(1, dtype=bool))), auxarr2)
return np.array(anew), np.array(pairsnew)
def reduceNumExp(self, s, y, a, c, pairs, lim=0.1, pprint=True, pplot=True):
'''
pools the short timescale exponentials
'''
# find inds of exponentials that have to be taken together
inds = np.where(np.abs(a.real) > (1e3 / lim))[0]
# the other indices stay the same
inds_no = np.where(np.abs(a.real) <= (1e3 / lim))[0]
anew = a[inds_no]; cnew = c[inds_no]; pairsnew = pairs[inds_no]
if len(inds) > 1:
amin = np.min(a[inds])
EF = ExpFitter()
if pplot == True:
import matplotlib.pyplot as pl
y_f_full = self.sumFExp(s, a, c)
y_f_part = self.sumFExp(s, a[inds], c[inds])
pl.figure('reduceNumExp problem')
pl.plot(s.imag, y_f_full.real, 'r')
pl.plot(s.imag, y_f_part.real, 'b')
pl.plot(s.imag, y_f_full.imag, 'r--')
pl.plot(s.imag, y_f_part.imag, 'b--')
pl.show()
# multiple step approach
t = np.linspace(0., 5./amin.real, 1000.)
y_t = EF.sumExp(t, -a[inds], c[inds])
y_t_full = EF.sumExp(t, -a, c)
A_t = -np.sum(c[inds] * (np.exp(-a[inds]*t[-1]) - np.exp(-a[inds]*t[0])) / a[inds])
y_t_lim = EF.sumExp(np.array([lim*1e-3]), -a[inds], c[inds])
y_t_lim_full = EF.sumExp(np.array([lim*1e-3]), -a, c)
#~ print 'full sum at 1ms: ', y_t_lim_full
#~ print 'partial sum at 1ms: ', y_t_lim
#~ print 'max full sum: ', np.max(y_t_full[1:])
# fit first outside of first timestep if necessary
if amin.real < (2e4 / lim) and np.abs(y_t_lim_full - y_t_lim) > 0.001 * np.max(y_t_full[1:]):
t_out = np.linspace(lim*1e-3, 5./amin.real, 1000.)
y_out = EF.sumExp(t_out, -a[inds], c[inds])
A_out = -np.sum(c[inds] * (np.exp(-a[inds]*t_out[-1]) - np.exp(-a[inds]*t_out[0])) / a[inds])
try:
# if the maximum of the to be grouped exponentials is past lim,
# we use two exponentials, otherwise one
#~ else:
A, C, _ = EF.fitExp(t_out, y_out, deg=1, rtol=0.0001, surface=True, A=A_out)
A = -A.real; C = C.real
Ptemp = [False]
except ValueError:
A = np.array([amin.real])
C = A_out / ((np.exp(-A*t_out[-1]) - np.exp(-A*t_out[0])) / A)
Ptemp = [False]
# check if we need to fit inside first timestep
t_in = np.linspace(0., lim*1e-3, 100)
y_in = EF.sumExp(t_in, -a[inds], c[inds]) - EF.sumExp(t_in, -A, C)
A_in_full = -np.sum(c[inds] * (np.exp(-a[inds]*t_in[-1]) - np.exp(-a[inds]*t_in[0])) / a[inds])
A_in = -np.sum(C * (np.exp(-A*t_in[-1]) - np.exp(-A*t_in[0])) / A)
if np.abs(A_in - A_in_full) < 0.01 * np.abs(A_in_full):
# we don't need to fit an extra exponential,
# but just rescale surfaces a bit
A_tot = np.sum(c[inds] / a[inds])
A_part = np.sum(C / A)
C = C * A_tot / A_part
P = np.array(Ptemp, dtype=bool)
else:
# we need to fit an extra exponential
t = np.linspace(0., 3./amin.real, 1000.)
A_t = np.sum(c[inds] / a[inds])
A_exp1 = np.sum(C / A)
A2 = np.array([1e4 / lim], dtype=complex)
C2 = (A_t-A_exp1) * A2
P = np.array(Ptemp + [False], dtype=bool)
A = np.concatenate((A, A2))
C = np.concatenate((C, C2))
else:
# we can just fit inside the first timestep
# construct new exponential naively
A = np.array([amin.real], dtype=complex)
C = np.sum(c[inds] / a[inds]) * A
P = np.array([False], dtype=bool)
# concatenate the arrays
anew = np.concatenate((anew, A))
cnew = np.concatenate((cnew, C))
pairsnew = np.concatenate((pairsnew, P))
if pprint or pplot:
t = np.linspace(0.,0.050,100000)
A_original = - np.sum(c * (np.exp(-a*t[-1]) - np.exp(-a*t[0])) / a)
A_new = - np.sum(cnew * (np.exp(-anew*t[-1]) - np.exp(-anew*t[0])) / anew)
if np.abs(A_original - A_new) > 1e-12 or np.isnan(A_new.real):
print '!!! Warning: surfaces under kernels not equal !!!'
print 'oringal surface: ', A_original
print 'new surface: ', A_new
print 'all a\'s: ', a
print 'all gamma\'s: ', c
print 'all pairs: ', pairs
print 'tbg a\'s: ', a[inds]
print 'tbg gamma\'s: ', c[inds]
print 'tbg pairs: ', pairs[inds]
print 'ntbg a\'s: ', a[inds_no]
print 'ntbg gamma\'s: ', c[inds_no]
print 'ntbg pairs: ', pairs[inds_no]
print 'new a\'s: ', anew
print 'new c\'s: ', cnew
print 'new pairss: ', pairsnew
if pplot and (np.abs(A_original - A_new) > 1e-12 or np.isnan(A_new.real)):
#~ if pplot:
t = np.linspace(0.,0.050,100000)
dt = t[1] - t[0]
ef = ExpFitter()
se_ = ef.sumExp(t, -a[inds], c[inds])
e_ = ef.sumExp(t, -A, C)
se = ef.sumExp(t, -a, c)
e = ef.sumExp(t, -anew, cnew)
print 'integral original reduced: ', - np.sum(c[inds] * (np.exp(-a[inds]*t[-1]) - np.exp(-a[inds]*t[0])) / a[inds])
print 'integral fit reduced: ', - np.sum(C * (np.exp(-A*t[-1]) - np.exp(-A*t[0])) / A)
print 'final a\'s :', anew
print 'new a\'s :', A
import matplotlib.pyplot as pl
pl.figure('reduce_exp problem')
pl.plot(t*1000, se, 'r', label='original kernel')
pl.plot(t*1000, e, 'b', label='new kernel')
pl.plot(t*1000, se_, 'r--', label='exps to be reduced')
pl.plot(t*1000, e_, 'b--', label='to be reduced exp')
pl.legend(loc=0)
pl.show()
# new approximation and rmse
approx = self.sumFExp(s, anew, cnew)
rms = np.sqrt(((np.abs(approx-y) / np.max(np.abs(y)))**2).sum() / len(y))
return anew, cnew, rms, approx, pairsnew
def fitFExp_increment(self, s, y, rtol=1e-2, maxiter=20, maxiter_step=3, realpoles=True, constrained=True, zerostart=False, pprint=True):
# find the start nodes
trialpoles, pairs = self._find_start_nodes(s, 1, realpoles, 'log10')
for k in range(maxiter):
alist, clist, rmslist, pairslist \
= self._run_fit(s, copy.copy(y), trialpoles, pairs, rtol, maxiter_step,
constrained, zerostart, pole_flip=True, pprint=pprint)
indmin = np.argmin(np.array(rmslist))
alpha = alist[indmin]; gamma = clist[indmin]; rms = rmslist[indmin]; pairs = pairslist[indmin]
if rms < rtol:
break
else:
if realpoles:
alphanew = [s[-1].imag * np.random.rand()]
pairsnew = [False]
else:
areal = s[-1].imag * np.random.rand()
aimag = s[-1].imag * np.random.rand()
alphanew = [areal + 1j*aimag, areal - 1j*aimag]
pairsnew = [True, False]
trialpoles = np.array(alpha.tolist() + alphanew)
# print trialpoles
pairs = np.array(pairs.tolist() + pairsnew)
rmsfinal = rms
if pprint and rmsfinal > rtol:
print 'Target accuracy was not reached'
return alpha, gamma, pairs, rmsfinal
def fitFExp(self, s, y, deg=20, rtol=1e-2, maxiter=5, lim=None, realpoles=True, initpoles='lin',
zerostart=False, constrained=True, reduce_numexp=False):
'''
Fits a function in fourrierspace by a series of fourrier transformed exponentials.
input:
-args
[s]: numpy array of frequencies (imaginary) at which value function is evaluated
[y]: numpy array of complex function values
-kwargs
[deg]: int, number of exponential terms used (real number is dubbeled if realpoles=False)
[rtol]: float, relative toleranse after which iterations stop
[maxiter]: int, maximum number of iterations
[lim]: float, smallest timescale to take into account [ms], if not None, the algorithm
fits the slowest timescale first, then the next slowest, etc. !!Use only for
decaying transfer functions!!
[realpoles]: boolean, use real starting poles if true, use complex conjugate poles if
false
[initpoles]: 'lin' for linearly spaced initial poles, 'log10' and 'log' for
logarithmically spaced poles
[zerostart]: boolean, constrain the function to be 0 at t=0 if true
[constrained]: fix the poles to be complex conjugate pairs
[reduce_numexp]: boolean, pool short time scale exponentials together if true
output:
[alpha]: numpy array of (complex) timescales of exponentials
[gamma]: numpy array of complex magnitudes of exponentials
[pairs]: boolean array that indicates True at every index where a complex
conjugate pair occurs
[rms]: float, root mean square error
'''
trialpoles, pairs = self._find_start_nodes(s, deg, realpoles, initpoles)
if lim != None:
a_s = []; c_s = []; pair_s = []
y_decr = copy.copy(y)
deg_decr = deg
keep_going = True
count = 0
while keep_going:
alist, clist, rmslist, pairslist = self._run_fit(s, y_decr, trialpoles, pairs, rtol, maxiter, constrained, zerostart)
indmin = np.argmin(np.array(rmslist))
anew, cnew, rmsnew, approx, pairsnew = self.reduceSeries(s, y_decr, alist[indmin], clist[indmin], pairs=pairslist[indmin], rtol=rtol)
if count == 0:
# save parameters for later purposes
asave = copy.copy(anew)
csave = copy.copy(cnew)
rmssave = rmsnew
pairssave = pairsnew
surface_original = np.sum(cnew / anew)
ind = []
# take the longest timescale out
ind.append(np.argmin(anew.real))
if pairsnew[ind]:
ind.append(ind[0]+1)
a_tba = anew[ind]
c_tba = cnew[ind]
pair_tba = pairsnew[ind]
surface = np.sum(cnew / anew)
y_old = copy.copy(y_decr)
y_decr = self.sumFExp(s, anew, cnew) - self.sumFExp(s, anew[ind], cnew[ind])
#~ deg_decr -= len(ind)
trialpoles, pairs = self._find_start_nodes(s, deg_decr, True, initpoles)
# stop if timescale is small enough
if anew[ind][0] > 1e3 / (0.2*lim):
if len(ind) == 1:
c_tba = surface * a_tba
elif len(ind) == 2:
c_tba[0] = surface * (a_tba[0].real**2 + a_tba[0].imag**2) / (2.*a_tba[0].real)
c_tba[1] = c_tba[0]
else:
raise ValueError('invalid array length')
keep_going = False
# stop if rmse is small enough
approx = self.sumFExp(s, a_tba, c_tba)
rms = np.sqrt(((np.abs(approx-y) / np.max(np.abs(y)))**2).sum() / len(y))
if rms < rtol:
keep_going = False
# append new parameters to lists
a_s += a_tba.tolist(); c_s += c_tba.tolist(); pair_s += pair_tba.tolist()
# stop if to many parameters
if count >= 9.:
keep_going = False
count +=1
# for returning
alpha = np.array(a_s, dtype=complex); gamma = np.array(c_s, dtype=complex); pairs = np.array(pair_s, dtype=bool)
approx = self.sumFExp(s, alpha, gamma)
rms = np.sqrt(((np.abs(approx-y) / np.max(np.abs(y)))**2).sum() / len(y))
# check whether it was better to go with the first parameters
if len(asave) < len(alpha) and rmssave < rtol:
alpha = asave; gamma = csave; pairs = pairssave
approx = self.sumFExp(s, alpha, gamma)
rms = np.sqrt(((np.abs(approx-y) / np.max(np.abs(y)))**2).sum() / len(y))
surface_after = np.sum(gamma / alpha)
if np.abs(surface_original - surface_after) > rtol * surface_original:
print 'surface original: ', surface_original
print 'surface after: ', surface_after
if np.min(alpha.real) < 0.:
print '!!!owowow!!!'
else:
alist, clist, rmslist, pairslist = self._run_fit(s, y, trialpoles, pairs, rtol, maxiter, constrained, zerostart)
indmin = np.argmin(np.array(rmslist))
alpha, gamma, rms, approx, pairs = self.reduceSeries(s, y, alist[indmin], clist[indmin], pairs=pairslist[indmin], rtol=rtol)
if reduce_numexp:
alpha, gamma, rms, approx, pairs = self.reduceNumExp(s, y ,alpha, gamma, pairs, pplot=False)
return alpha, gamma, pairs, rms
def fitFExp_vector(self, s, ys, deg=20, rtol=1e-2, maxiter=5, extra_startpoles=[], extra_startpoles_pairs=[],
realpoles=True, initpoles='lin', reduce_series=False):
'''
Fit multiple data-arrays in Fourrier-domain simultaneously with a shared set of nodes
input:
[s]: numpy array of complex number, frequencies of data
[ys]: numpy ndarray of complex numbers, rows are different data-arrays
[deg]: int, the starting number of nodes
[rtol]: float, the relative tolercance at which to stop
[maxiter]: int, the maximal number of iterations after which to stop when rtol
is not reached
[extra_startpoles]: numpy array of complex number, additional initial poles
[extra_startpoles_pairs]: numpy bolean array, indicates complex conjugate pairs
associated with the extra initial poles
[realpoles]: boolean, if True the starting poles are real, if false the starting
poles are complex conjugates (and then the real degree is 2*deg)
[initpoles]: string specifying how the initial poles are distributed, choices are
'lin', 'log' and 'log10'
[reduce_series]: boolean, whether to delete expontentials of small influence after
the fitting
output:
[alpha]: complex numpy array of exponential coefficients
[gamma]: 2d complex numpy array, each row contains the residues corresponding to
the respective data arrays
[pairs]: boolean numpy array, indicates where a pair of complex conjugate
exponentials occurs
[rms]: float, aggregated root mean square error
'''
trialpoles, pairs = self._find_start_nodes(s, deg, realpoles, initpoles)
if len(extra_startpoles) > 0:
trialpoles = np.concatenate((trialpoles, extra_startpoles))
pairs = np.concatenate((pairs, extra_startpoles_pairs))
alist, clist, rmslist, pairslist = self._run_fit_vector(s, ys, trialpoles, pairs, rtol, maxiter)
indmin = np.argmin(np.array(rmslist))
if reduce_series:
# reduce the number of exponentials for each function separately
alpha_arr = np.array([], dtype=complex)
rms = 0.
for ind, c in enumerate(clist[indmin]):
alpha, gamma, rms_ind, approx, pair = self.reduceSeries(s, ys[ind], alist[indmin], c, pairs=pairslist[indmin], rtol=rtol)
rms += rms_ind
alpha_arr = np.concatenate((alpha_arr, alpha))
alpha_arr = np.unique(alpha_arr)
# search positions of common alphas
asortind = np.argsort(alist[indmin])
alphapos = np.searchsorted(alist[indmin][asortind], alpha_arr)
inds = asortind[alphapos]
return alist[indmin][inds], clist[indmin][:,inds], pairslist[indmin][inds], rms
else:
return alist[indmin], clist[indmin], pairslist[indmin], rmslist[indmin]
| mit |
mohseniaref/PySAR-1 | pysar/plot.py | 1 | 6588 | #! /usr/bin/env python
############################################################
# Program is part of PySAR v1.0 #
# Copyright(c) 2013, Heresh Fattahi #
# Author: Heresh Fattahi #
############################################################
#from matplotlib import colors
import getopt
import numpy as np
import h5py
import _readfile as readfile
import _pysar_utilities as ut
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import sys
import os
from matplotlib.colors import LinearSegmentedColormap
def Usage():
print '''
plotting the geocoded PySAR product
plot.py -f velocity.h5 -d dem -m min -M max -x subset -y subst -o outName -i inverse colormap display (yes or no) -c colomap
-x : 'xmin:xmax'
-y : 'ymin:ymax'
-c : all colormaps in matplotlib is supported (see http://matplotlib.org/examples/pylab_examples/show_colormaps.html)
Examples:
plot.py -f geo_velocity.h5 -d Sonoran.dem -m -0.01 -M 0.01 -i yes -o plotVelocity.png -c pysar_hsv
'''
def main(argv):
color_map='jet'
disp_opposite='no'
try:
opts, args = getopt.getopt(argv,"h:f:d:o:x:y:m:M:i:c:")
except getopt.GetoptError:
Usage() ; sys.exit(1)
if opts==[]:
Usage() ; sys.exit(1)
for opt,arg in opts:
if opt in ("-h","--help"):
Usage()
sys.exit()
elif opt == '-f':
File = arg
elif opt == '-d':
demFile=arg
elif opt=='-m':
Vmin=float(arg)
elif opt=='-M':
Vmax=float(arg)
elif opt == '-x':
winx=arg.split(':')
elif opt == '-y':
winy = arg.split(':')
elif opt == '-o':
outName = arg
elif opt == '-i':
disp_opposite = arg
elif opt == '-c':
color_map=arg
h5file=h5py.File(File,'r')
k=h5file.keys()
print k[0]
# ccmap=plt.get_cmap(color_map)
################################################
cdict1 = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(0.6, 1.0, 1.0),
(0.8, 1.0, 1.0),
(1.0, 0.5, 0.5)),
'green': ((0.0, 0.0, 0.0),
(0.2, 0.0, 0.0),
(0.4, 1.0, 1.0),
(0.6, 1.0, 1.0),
(0.8, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.5, .5),
(0.2, 1.0, 1.0),
(0.4, 1.0, 1.0),
(0.5, 0.0, 0.0),
(1.0, 0.0, 0.0),)
}
if color_map =='pysar_hsv':
ccmap = LinearSegmentedColormap('BlueRed1', cdict1)
else:
ccmap=plt.get_cmap(color_map)
print 'colormap is : '+ color_map
################################################
dset = h5file[k[0]].get(k[0])
data=dset[0:dset.shape[0],0:dset.shape[1]]
if disp_opposite in('yes','Yes','Y','y','YES'):
data=-1*data
try:
xref=h5file[k[0]].attrs['ref_x']
yref=h5file[k[0]].attrs['ref_y']
except:
print 'No reference point'
try:
ullon=float(h5file[k[0]].attrs['X_FIRST'])
ullat=float(h5file[k[0]].attrs['Y_FIRST'])
lon_step=float(h5file[k[0]].attrs['X_STEP'])
lat_step=float(h5file[k[0]].attrs['Y_STEP'])
lon_unit=h5file[k[0]].attrs['Y_UNIT']
lat_unit=h5file[k[0]].attrs['X_UNIT']
llcrnrlon=ullon
llcrnrlat=ullat+lat_step*data.shape[0]
urcrnrlon=ullon+lon_step*data.shape[1]
urcrnrlat=ullat
geocoord='yes'
print 'Input file is Geocoded'
except:
geocoord='no'
fig = plt.figure()
ax = fig.add_axes([0.1,0.1,0.8,0.8])
m = Basemap(llcrnrlon=llcrnrlon, llcrnrlat=llcrnrlat, urcrnrlon=urcrnrlon, urcrnrlat=urcrnrlat,
resolution='l', area_thresh=1., projection='cyl',suppress_ticks=False,ax=ax)
print demFile
demFile
if os.path.basename(demFile).split('.')[1]=='hgt':
amp,dem,demRsc = readfile.read_float32(demFile)
elif os.path.basename(demFile).split('.')[1]=='dem':
dem,demRsc = readfile.read_dem(demFile)
#################################################################
try:
winx
wx=[int(i) for i in win_x.split()]
dem=dem[:,wx[0]:wx[1]]
data=data[:,wx[0]:wx[1]]
ullon=float(h5file[k[0]].attrs['X_FIRST'])+wx[0]
llcrnrlon=ullon
urcrnrlon=ullon+lon_step*data.shape[1]
except:
print ''
try:
winy
wy=[int(i) for i in winy.split()]
dem=dem[wy[0]:wy[1],:]
data=data[wy[0]:wy[1],:]
except:
print ''
################################################################
fig = plt.figure()
ax = fig.add_axes([0.1,0.1,0.8,0.8])
m = Basemap(llcrnrlon=llcrnrlon, llcrnrlat=llcrnrlat, urcrnrlon=urcrnrlon, urcrnrlat=urcrnrlat,
resolution='l', area_thresh=1., projection='cyl',suppress_ticks=False,ax=ax)
cmap_dem=plt.get_cmap('gray')
m.imshow(ut.hillshade(np.flipud(dem),50.0),cmap=cmap_dem)
try:
im=m.imshow(np.flipud(data),vmin=Vmin,vmax=Vmax,cmap=ccmap)
# cb = m.colorbar(im,"right", size="5%", pad='2%')
except:
im=m.imshow(np.flipud(data))
# cb = m.colorbar(im,"right", size="5%", pad='2%')
# m.bluemarble()
# cb = m.colorbar(im,"right", size="5%", pad='2%')
# parallels = np.arange(31.,34,0.5)
# m.drawparallels(parallels,labels=[1,0,0,1],linewidth=0.0)
# meridians = np.arange(-115.,-112.,0.5)
# m.drawmeridians(meridians,labels=[1,0,0,1],linewidth=0.0)
# m.drawmapscale()
# m = Basemap(llcrnrlon=-110.,llcrnrlat=0.,urcrnrlon=-20.,urcrnrlat=57.,
# projection='lcc',lat_1=20.,lat_2=40.,lon_0=-60.,
# resolution ='l',area_thresh=1000.)
# m.drawcoastlines()
# m.drawcountries()
# m.drawmapboundary(fill_color='#99ffff')
# m.fillcontinents(color='#cc9966',lake_color='#99ffff')
# m.drawparallels(np.arange(10,70,20),labels=[1,1,0,0])
# m.drawmeridians(np.arange(-100,0,20),labels=[0,0,0,1])
# plt.title('Atlantic Hurricane Tracks (Storms Reaching Category 4, 1851-2004)')
try:
figName = outName
except:
outName=os.path.basename(File).replace('.h5','')
figName = outName + '.png'
plt.savefig(figName,pad_inches=0.0)
# plt.show()
h5file.close()
if __name__ == '__main__':
main(sys.argv[1:])
| mit |
dmigo/incubator-superset | superset/db_engine_specs.py | 1 | 56036 | # -*- coding: utf-8 -*-
# pylint: disable=C,R,W
"""Compatibility layer for different database engines
This modules stores logic specific to different database engines. Things
like time-related functions that are similar but not identical, or
information as to expose certain features or not and how to expose them.
For instance, Hive/Presto supports partitions and have a specific API to
list partitions. Other databases like Vertica also support partitions but
have different API to get to them. Other databases don't support partitions
at all. The classes here will use a common interface to specify all this.
The general idea is to use static classes and an inheritance scheme.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict, namedtuple
import inspect
import logging
import os
import re
import textwrap
import time
import boto3
from flask import g
from flask_babel import lazy_gettext as _
import pandas
import sqlalchemy as sqla
from sqlalchemy import select
from sqlalchemy.engine import create_engine
from sqlalchemy.engine.url import make_url
from sqlalchemy.sql import text
from sqlalchemy.sql.expression import TextAsFrom
import sqlparse
from tableschema import Table
from werkzeug.utils import secure_filename
from superset import app, cache_util, conf, db, utils
from superset.exceptions import SupersetTemplateException
from superset.utils import QueryStatus
config = app.config
tracking_url_trans = conf.get('TRACKING_URL_TRANSFORMER')
hive_poll_interval = conf.get('HIVE_POLL_INTERVAL')
Grain = namedtuple('Grain', 'name label function duration')
class LimitMethod(object):
"""Enum the ways that limits can be applied"""
FETCH_MANY = 'fetch_many'
WRAP_SQL = 'wrap_sql'
FORCE_LIMIT = 'force_limit'
class BaseEngineSpec(object):
"""Abstract class for database engine specific configurations"""
engine = 'base' # str as defined in sqlalchemy.engine.engine
cursor_execute_kwargs = {}
time_grains = tuple()
time_groupby_inline = False
limit_method = LimitMethod.FORCE_LIMIT
time_secondary_columns = False
inner_joins = True
@classmethod
def fetch_data(cls, cursor, limit):
if cls.limit_method == LimitMethod.FETCH_MANY:
return cursor.fetchmany(limit)
return cursor.fetchall()
@classmethod
def epoch_to_dttm(cls):
raise NotImplementedError()
@classmethod
def epoch_ms_to_dttm(cls):
return cls.epoch_to_dttm().replace('{col}', '({col}/1000.0)')
@classmethod
def extra_table_metadata(cls, database, table_name, schema_name):
"""Returns engine-specific table metadata"""
return {}
@classmethod
def apply_limit_to_sql(cls, sql, limit, database):
"""Alters the SQL statement to apply a LIMIT clause"""
if cls.limit_method == LimitMethod.WRAP_SQL:
sql = sql.strip('\t\n ;')
qry = (
select('*')
.select_from(
TextAsFrom(text(sql), ['*']).alias('inner_qry'),
)
.limit(limit)
)
return database.compile_sqla_query(qry)
elif LimitMethod.FORCE_LIMIT:
sql_without_limit = cls.get_query_without_limit(sql)
return '{sql_without_limit} LIMIT {limit}'.format(**locals())
return sql
@classmethod
def get_limit_from_sql(cls, sql):
limit_pattern = re.compile(r"""
(?ix) # case insensitive, verbose
\s+ # whitespace
LIMIT\s+(\d+) # LIMIT $ROWS
;? # optional semi-colon
(\s|;)*$ # remove trailing spaces tabs or semicolons
""")
matches = limit_pattern.findall(sql)
if matches:
return int(matches[0][0])
@classmethod
def get_query_without_limit(cls, sql):
return re.sub(r"""
(?ix) # case insensitive, verbose
\s+ # whitespace
LIMIT\s+\d+ # LIMIT $ROWS
;? # optional semi-colon
(\s|;)*$ # remove trailing spaces tabs or semicolons
""", '', sql)
@staticmethod
def csv_to_df(**kwargs):
kwargs['filepath_or_buffer'] = \
config['UPLOAD_FOLDER'] + kwargs['filepath_or_buffer']
kwargs['encoding'] = 'utf-8'
kwargs['iterator'] = True
chunks = pandas.read_csv(**kwargs)
df = pandas.DataFrame()
df = pandas.concat(chunk for chunk in chunks)
return df
@staticmethod
def df_to_db(df, table, **kwargs):
df.to_sql(**kwargs)
table.user_id = g.user.id
table.schema = kwargs['schema']
table.fetch_metadata()
db.session.add(table)
db.session.commit()
@staticmethod
def create_table_from_csv(form, table):
def _allowed_file(filename):
# Only allow specific file extensions as specified in the config
extension = os.path.splitext(filename)[1]
return extension and extension[1:] in config['ALLOWED_EXTENSIONS']
filename = secure_filename(form.csv_file.data.filename)
if not _allowed_file(filename):
raise Exception('Invalid file type selected')
kwargs = {
'filepath_or_buffer': filename,
'sep': form.sep.data,
'header': form.header.data if form.header.data else 0,
'index_col': form.index_col.data,
'mangle_dupe_cols': form.mangle_dupe_cols.data,
'skipinitialspace': form.skipinitialspace.data,
'skiprows': form.skiprows.data,
'nrows': form.nrows.data,
'skip_blank_lines': form.skip_blank_lines.data,
'parse_dates': form.parse_dates.data,
'infer_datetime_format': form.infer_datetime_format.data,
'chunksize': 10000,
}
df = BaseEngineSpec.csv_to_df(**kwargs)
df_to_db_kwargs = {
'table': table,
'df': df,
'name': form.name.data,
'con': create_engine(form.con.data.sqlalchemy_uri_decrypted, echo=False),
'schema': form.schema.data,
'if_exists': form.if_exists.data,
'index': form.index.data,
'index_label': form.index_label.data,
'chunksize': 10000,
}
BaseEngineSpec.df_to_db(**df_to_db_kwargs)
@classmethod
def convert_dttm(cls, target_type, dttm):
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
@classmethod
@cache_util.memoized_func(
timeout=600,
key=lambda *args, **kwargs: 'db:{}:{}'.format(args[0].id, args[1]))
def fetch_result_sets(cls, db, datasource_type, force=False):
"""Returns the dictionary {schema : [result_set_name]}.
Datasource_type can be 'table' or 'view'.
Empty schema corresponds to the list of full names of the all
tables or views: <schema>.<result_set_name>.
"""
schemas = db.inspector.get_schema_names()
result_sets = {}
all_result_sets = []
for schema in schemas:
if datasource_type == 'table':
result_sets[schema] = sorted(
db.inspector.get_table_names(schema))
elif datasource_type == 'view':
result_sets[schema] = sorted(
db.inspector.get_view_names(schema))
all_result_sets += [
'{}.{}'.format(schema, t) for t in result_sets[schema]]
if all_result_sets:
result_sets[''] = all_result_sets
return result_sets
@classmethod
def handle_cursor(cls, cursor, query, session):
"""Handle a live cursor between the execute and fetchall calls
The flow works without this method doing anything, but it allows
for handling the cursor and updating progress information in the
query object"""
pass
@classmethod
def extract_error_message(cls, e):
"""Extract error message for queries"""
return utils.error_msg_from_exception(e)
@classmethod
def adjust_database_uri(cls, uri, selected_schema):
"""Based on a URI and selected schema, return a new URI
The URI here represents the URI as entered when saving the database,
``selected_schema`` is the schema currently active presumably in
the SQL Lab dropdown. Based on that, for some database engine,
we can return a new altered URI that connects straight to the
active schema, meaning the users won't have to prefix the object
names by the schema name.
Some databases engines have 2 level of namespacing: database and
schema (postgres, oracle, mssql, ...)
For those it's probably better to not alter the database
component of the URI with the schema name, it won't work.
Some database drivers like presto accept '{catalog}/{schema}' in
the database component of the URL, that can be handled here.
"""
return uri
@classmethod
def patch(cls):
pass
@classmethod
def get_schema_names(cls, inspector):
return inspector.get_schema_names()
@classmethod
def get_table_names(cls, schema, inspector):
return sorted(inspector.get_table_names(schema))
@classmethod
def where_latest_partition(
cls, table_name, schema, database, qry, columns=None):
return False
@classmethod
def select_star(cls, my_db, table_name, engine, schema=None, limit=100,
show_cols=False, indent=True, latest_partition=True,
cols=None):
fields = '*'
cols = cols or []
if (show_cols or latest_partition) and not cols:
cols = my_db.get_columns(table_name, schema)
if show_cols:
fields = [sqla.column(c.get('name')) for c in cols]
full_table_name = table_name
quote = engine.dialect.identifier_preparer.quote
if schema:
full_table_name = quote(schema) + '.' + quote(table_name)
else:
full_table_name = quote(table_name)
qry = select(fields).select_from(text(full_table_name))
if limit:
qry = qry.limit(limit)
if latest_partition:
partition_query = cls.where_latest_partition(
table_name, schema, my_db, qry, columns=cols)
if partition_query != False: # noqa
qry = partition_query
sql = my_db.compile_sqla_query(qry)
if indent:
sql = sqlparse.format(sql, reindent=True)
return sql
@classmethod
def modify_url_for_impersonation(cls, url, impersonate_user, username):
"""
Modify the SQL Alchemy URL object with the user to impersonate if applicable.
:param url: SQLAlchemy URL object
:param impersonate_user: Bool indicating if impersonation is enabled
:param username: Effective username
"""
if impersonate_user is not None and username is not None:
url.username = username
@classmethod
def get_configuration_for_impersonation(cls, uri, impersonate_user, username):
"""
Return a configuration dictionary that can be merged with other configs
that can set the correct properties for impersonating users
:param uri: URI string
:param impersonate_user: Bool indicating if impersonation is enabled
:param username: Effective username
:return: Dictionary with configs required for impersonation
"""
return {}
@classmethod
def get_normalized_column_names(cls, cursor_description):
columns = cursor_description if cursor_description else []
return [cls.normalize_column_name(col[0]) for col in columns]
@staticmethod
def normalize_column_name(column_name):
return column_name
class PostgresBaseEngineSpec(BaseEngineSpec):
""" Abstract class for Postgres 'like' databases """
engine = ''
time_grains = (
Grain('Time Column', _('Time Column'), '{col}', None),
Grain('second', _('second'),
"DATE_TRUNC('second', {col}) AT TIME ZONE 'UTC'", 'PT1S'),
Grain('minute', _('minute'),
"DATE_TRUNC('minute', {col}) AT TIME ZONE 'UTC'", 'PT1M'),
Grain('hour', _('hour'),
"DATE_TRUNC('hour', {col}) AT TIME ZONE 'UTC'", 'PT1H'),
Grain('day', _('day'),
"DATE_TRUNC('day', {col}) AT TIME ZONE 'UTC'", 'P1D'),
Grain('week', _('week'),
"DATE_TRUNC('week', {col}) AT TIME ZONE 'UTC'", 'P1W'),
Grain('month', _('month'),
"DATE_TRUNC('month', {col}) AT TIME ZONE 'UTC'", 'P1M'),
Grain('quarter', _('quarter'),
"DATE_TRUNC('quarter', {col}) AT TIME ZONE 'UTC'", 'P0.25Y'),
Grain('year', _('year'),
"DATE_TRUNC('year', {col}) AT TIME ZONE 'UTC'", 'P1Y'),
)
@classmethod
def fetch_data(cls, cursor, limit):
if not cursor.description:
return []
if cls.limit_method == LimitMethod.FETCH_MANY:
return cursor.fetchmany(limit)
return cursor.fetchall()
@classmethod
def epoch_to_dttm(cls):
return "(timestamp 'epoch' + {col} * interval '1 second')"
@classmethod
def convert_dttm(cls, target_type, dttm):
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
class PostgresEngineSpec(PostgresBaseEngineSpec):
engine = 'postgresql'
@classmethod
def get_table_names(cls, schema, inspector):
"""Need to consider foreign tables for PostgreSQL"""
tables = inspector.get_table_names(schema)
tables.extend(inspector.get_foreign_table_names(schema))
return sorted(tables)
class SnowflakeEngineSpec(PostgresBaseEngineSpec):
engine = 'snowflake'
time_grains = (
Grain('Time Column', _('Time Column'), '{col}', None),
Grain('second', _('second'), "DATE_TRUNC('SECOND', {col})", 'PT1S'),
Grain('minute', _('minute'), "DATE_TRUNC('MINUTE', {col})", 'PT1M'),
Grain('hour', _('hour'), "DATE_TRUNC('HOUR', {col})", 'PT1H'),
Grain('day', _('day'), "DATE_TRUNC('DAY', {col})", 'P1D'),
Grain('week', _('week'), "DATE_TRUNC('WEEK', {col})", 'P1W'),
Grain('month', _('month'), "DATE_TRUNC('MONTH', {col})", 'P1M'),
Grain('quarter', _('quarter'), "DATE_TRUNC('QUARTER', {col})", 'P0.25Y'),
Grain('year', _('year'), "DATE_TRUNC('YEAR', {col})", 'P1Y'),
)
@staticmethod
def normalize_column_name(column_name):
return column_name.lower()
class VerticaEngineSpec(PostgresBaseEngineSpec):
engine = 'vertica'
class RedshiftEngineSpec(PostgresBaseEngineSpec):
engine = 'redshift'
class OracleEngineSpec(PostgresBaseEngineSpec):
engine = 'oracle'
limit_method = LimitMethod.WRAP_SQL
time_grains = (
Grain('Time Column', _('Time Column'), '{col}', None),
Grain('minute', _('minute'), "TRUNC(TO_DATE({col}), 'MI')", 'PT1M'),
Grain('hour', _('hour'), "TRUNC(TO_DATE({col}), 'HH')", 'PT1H'),
Grain('day', _('day'), "TRUNC(TO_DATE({col}), 'DDD')", 'P1D'),
Grain('week', _('week'), "TRUNC(TO_DATE({col}), 'WW')", 'P1W'),
Grain('month', _('month'), "TRUNC(TO_DATE({col}), 'MONTH')", 'P1M'),
Grain('quarter', _('quarter'), "TRUNC(TO_DATE({col}), 'Q')", 'P0.25Y'),
Grain('year', _('year'), "TRUNC(TO_DATE({col}), 'YEAR')", 'P1Y'),
)
@classmethod
def convert_dttm(cls, target_type, dttm):
return (
"""TO_TIMESTAMP('{}', 'YYYY-MM-DD"T"HH24:MI:SS.ff6')"""
).format(dttm.isoformat())
@staticmethod
def normalize_column_name(column_name):
return column_name.lower()
class Db2EngineSpec(BaseEngineSpec):
engine = 'ibm_db_sa'
limit_method = LimitMethod.WRAP_SQL
time_grains = (
Grain('Time Column', _('Time Column'), '{col}', None),
Grain('second', _('second'),
'CAST({col} as TIMESTAMP)'
' - MICROSECOND({col}) MICROSECONDS',
'PT1S'),
Grain('minute', _('minute'),
'CAST({col} as TIMESTAMP)'
' - SECOND({col}) SECONDS'
' - MICROSECOND({col}) MICROSECONDS',
'PT1M'),
Grain('hour', _('hour'),
'CAST({col} as TIMESTAMP)'
' - MINUTE({col}) MINUTES'
' - SECOND({col}) SECONDS'
' - MICROSECOND({col}) MICROSECONDS ',
'PT1H'),
Grain('day', _('day'),
'CAST({col} as TIMESTAMP)'
' - HOUR({col}) HOURS'
' - MINUTE({col}) MINUTES'
' - SECOND({col}) SECONDS'
' - MICROSECOND({col}) MICROSECONDS ',
'P1D'),
Grain('week', _('week'),
'{col} - (DAYOFWEEK({col})) DAYS',
'P1W'),
Grain('month', _('month'),
'{col} - (DAY({col})-1) DAYS',
'P1M'),
Grain('quarter', _('quarter'),
'{col} - (DAY({col})-1) DAYS'
' - (MONTH({col})-1) MONTHS'
' + ((QUARTER({col})-1) * 3) MONTHS',
'P0.25Y'),
Grain('year', _('year'),
'{col} - (DAY({col})-1) DAYS'
' - (MONTH({col})-1) MONTHS',
'P1Y'),
)
@classmethod
def epoch_to_dttm(cls):
return "(TIMESTAMP('1970-01-01', '00:00:00') + {col} SECONDS)"
@classmethod
def convert_dttm(cls, target_type, dttm):
return "'{}'".format(dttm.strftime('%Y-%m-%d-%H.%M.%S'))
class SqliteEngineSpec(BaseEngineSpec):
engine = 'sqlite'
time_grains = (
Grain('Time Column', _('Time Column'), '{col}', None),
Grain('hour', _('hour'),
"DATETIME(STRFTIME('%Y-%m-%dT%H:00:00', {col}))",
'PT1H'),
Grain('day', _('day'), 'DATE({col})', 'P1D'),
Grain('week', _('week'),
"DATE({col}, -strftime('%W', {col}) || ' days')",
'P1W'),
Grain('month', _('month'),
"DATE({col}, -strftime('%d', {col}) || ' days', '+1 day')",
'P1M'),
Grain('year', _('year'),
"DATETIME(STRFTIME('%Y-01-01T00:00:00', {col}))",
'P1Y'),
Grain('week_ending_saturday', _('week_ending_saturday'),
"DATE({col}, 'weekday 6')",
'P1W/1970-01-03T00:00:00Z'),
Grain('week_start_sunday', _('week_start_sunday'),
"DATE({col}, 'weekday 0', '-7 days')",
'1969-12-28T00:00:00Z/P1W'),
)
@classmethod
def epoch_to_dttm(cls):
return "datetime({col}, 'unixepoch')"
@classmethod
@cache_util.memoized_func(
timeout=600,
key=lambda *args, **kwargs: 'db:{}:{}'.format(args[0].id, args[1]))
def fetch_result_sets(cls, db, datasource_type, force=False):
schemas = db.inspector.get_schema_names()
result_sets = {}
all_result_sets = []
schema = schemas[0]
if datasource_type == 'table':
result_sets[schema] = sorted(db.inspector.get_table_names())
elif datasource_type == 'view':
result_sets[schema] = sorted(db.inspector.get_view_names())
all_result_sets += [
'{}.{}'.format(schema, t) for t in result_sets[schema]]
if all_result_sets:
result_sets[''] = all_result_sets
return result_sets
@classmethod
def convert_dttm(cls, target_type, dttm):
iso = dttm.isoformat().replace('T', ' ')
if '.' not in iso:
iso += '.000000'
return "'{}'".format(iso)
@classmethod
def get_table_names(cls, schema, inspector):
"""Need to disregard the schema for Sqlite"""
return sorted(inspector.get_table_names())
class MySQLEngineSpec(BaseEngineSpec):
engine = 'mysql'
cursor_execute_kwargs = {'args': {}}
time_grains = (
Grain('Time Column', _('Time Column'), '{col}', None),
Grain('second', _('second'), 'DATE_ADD(DATE({col}), '
'INTERVAL (HOUR({col})*60*60 + MINUTE({col})*60'
' + SECOND({col})) SECOND)',
'PT1S'),
Grain('minute', _('minute'), 'DATE_ADD(DATE({col}), '
'INTERVAL (HOUR({col})*60 + MINUTE({col})) MINUTE)',
'PT1M'),
Grain('hour', _('hour'), 'DATE_ADD(DATE({col}), '
'INTERVAL HOUR({col}) HOUR)',
'PT1H'),
Grain('day', _('day'), 'DATE({col})', 'P1D'),
Grain('week', _('week'), 'DATE(DATE_SUB({col}, '
'INTERVAL DAYOFWEEK({col}) - 1 DAY))',
'P1W'),
Grain('month', _('month'), 'DATE(DATE_SUB({col}, '
'INTERVAL DAYOFMONTH({col}) - 1 DAY))',
'P1M'),
Grain('quarter', _('quarter'), 'MAKEDATE(YEAR({col}), 1) '
'+ INTERVAL QUARTER({col}) QUARTER - INTERVAL 1 QUARTER',
'P0.25Y'),
Grain('year', _('year'), 'DATE(DATE_SUB({col}, '
'INTERVAL DAYOFYEAR({col}) - 1 DAY))',
'P1Y'),
Grain('week_start_monday', _('week_start_monday'),
'DATE(DATE_SUB({col}, '
'INTERVAL DAYOFWEEK(DATE_SUB({col}, INTERVAL 1 DAY)) - 1 DAY))',
'P1W'),
)
@classmethod
def convert_dttm(cls, target_type, dttm):
if target_type.upper() in ('DATETIME', 'DATE'):
return "STR_TO_DATE('{}', '%Y-%m-%d %H:%i:%s')".format(
dttm.strftime('%Y-%m-%d %H:%M:%S'))
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
@classmethod
def adjust_database_uri(cls, uri, selected_schema=None):
if selected_schema:
uri.database = selected_schema
return uri
@classmethod
def epoch_to_dttm(cls):
return 'from_unixtime({col})'
@classmethod
def extract_error_message(cls, e):
"""Extract error message for queries"""
message = str(e)
try:
if isinstance(e.args, tuple) and len(e.args) > 1:
message = e.args[1]
except Exception:
pass
return message
class PrestoEngineSpec(BaseEngineSpec):
engine = 'presto'
cursor_execute_kwargs = {'parameters': None}
time_grains = (
Grain('Time Column', _('Time Column'), '{col}', None),
Grain('second', _('second'),
"date_trunc('second', CAST({col} AS TIMESTAMP))",
'PT1S'),
Grain('minute', _('minute'),
"date_trunc('minute', CAST({col} AS TIMESTAMP))",
'PT1M'),
Grain('hour', _('hour'),
"date_trunc('hour', CAST({col} AS TIMESTAMP))",
'PT1H'),
Grain('day', _('day'),
"date_trunc('day', CAST({col} AS TIMESTAMP))",
'P1D'),
Grain('week', _('week'),
"date_trunc('week', CAST({col} AS TIMESTAMP))",
'P1W'),
Grain('month', _('month'),
"date_trunc('month', CAST({col} AS TIMESTAMP))",
'P1M'),
Grain('quarter', _('quarter'),
"date_trunc('quarter', CAST({col} AS TIMESTAMP))",
'P0.25Y'),
Grain('week_ending_saturday', _('week_ending_saturday'),
"date_add('day', 5, date_trunc('week', date_add('day', 1, "
'CAST({col} AS TIMESTAMP))))',
'P1W/1970-01-03T00:00:00Z'),
Grain('week_start_sunday', _('week_start_sunday'),
"date_add('day', -1, date_trunc('week', "
"date_add('day', 1, CAST({col} AS TIMESTAMP))))",
'1969-12-28T00:00:00Z/P1W'),
Grain('year', _('year'),
"date_trunc('year', CAST({col} AS TIMESTAMP))",
'P1Y'),
)
@classmethod
def adjust_database_uri(cls, uri, selected_schema=None):
database = uri.database
if selected_schema and database:
if '/' in database:
database = database.split('/')[0] + '/' + selected_schema
else:
database += '/' + selected_schema
uri.database = database
return uri
@classmethod
def convert_dttm(cls, target_type, dttm):
tt = target_type.upper()
if tt == 'DATE':
return "from_iso8601_date('{}')".format(dttm.isoformat()[:10])
if tt == 'TIMESTAMP':
return "from_iso8601_timestamp('{}')".format(dttm.isoformat())
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
@classmethod
def epoch_to_dttm(cls):
return 'from_unixtime({col})'
@classmethod
@cache_util.memoized_func(
timeout=600,
key=lambda *args, **kwargs: 'db:{}:{}'.format(args[0].id, args[1]))
def fetch_result_sets(cls, db, datasource_type, force=False):
"""Returns the dictionary {schema : [result_set_name]}.
Datasource_type can be 'table' or 'view'.
Empty schema corresponds to the list of full names of the all
tables or views: <schema>.<result_set_name>.
"""
result_set_df = db.get_df(
"""SELECT table_schema, table_name FROM INFORMATION_SCHEMA.{}S
ORDER BY concat(table_schema, '.', table_name)""".format(
datasource_type.upper(),
),
None)
result_sets = defaultdict(list)
for unused, row in result_set_df.iterrows():
result_sets[row['table_schema']].append(row['table_name'])
result_sets[''].append('{}.{}'.format(
row['table_schema'], row['table_name']))
return result_sets
@classmethod
def extra_table_metadata(cls, database, table_name, schema_name):
indexes = database.get_indexes(table_name, schema_name)
if not indexes:
return {}
cols = indexes[0].get('column_names', [])
full_table_name = table_name
if schema_name and '.' not in table_name:
full_table_name = '{}.{}'.format(schema_name, table_name)
pql = cls._partition_query(full_table_name)
col_name, latest_part = cls.latest_partition(
table_name, schema_name, database, show_first=True)
return {
'partitions': {
'cols': cols,
'latest': {col_name: latest_part},
'partitionQuery': pql,
},
}
@classmethod
def handle_cursor(cls, cursor, query, session):
"""Updates progress information"""
logging.info('Polling the cursor for progress')
polled = cursor.poll()
# poll returns dict -- JSON status information or ``None``
# if the query is done
# https://github.com/dropbox/PyHive/blob/
# b34bdbf51378b3979eaf5eca9e956f06ddc36ca0/pyhive/presto.py#L178
while polled:
# Update the object and wait for the kill signal.
stats = polled.get('stats', {})
query = session.query(type(query)).filter_by(id=query.id).one()
if query.status in [QueryStatus.STOPPED, QueryStatus.TIMED_OUT]:
cursor.cancel()
break
if stats:
state = stats.get('state')
# if already finished, then stop polling
if state == 'FINISHED':
break
completed_splits = float(stats.get('completedSplits'))
total_splits = float(stats.get('totalSplits'))
if total_splits and completed_splits:
progress = 100 * (completed_splits / total_splits)
logging.info(
'Query progress: {} / {} '
'splits'.format(completed_splits, total_splits))
if progress > query.progress:
query.progress = progress
session.commit()
time.sleep(1)
logging.info('Polling the cursor for progress')
polled = cursor.poll()
@classmethod
def extract_error_message(cls, e):
if (
hasattr(e, 'orig') and
type(e.orig).__name__ == 'DatabaseError' and
isinstance(e.orig[0], dict)):
error_dict = e.orig[0]
return '{} at {}: {}'.format(
error_dict.get('errorName'),
error_dict.get('errorLocation'),
error_dict.get('message'),
)
if (
type(e).__name__ == 'DatabaseError' and
hasattr(e, 'args') and
len(e.args) > 0
):
error_dict = e.args[0]
return error_dict.get('message')
return utils.error_msg_from_exception(e)
@classmethod
def _partition_query(
cls, table_name, limit=0, order_by=None, filters=None):
"""Returns a partition query
:param table_name: the name of the table to get partitions from
:type table_name: str
:param limit: the number of partitions to be returned
:type limit: int
:param order_by: a list of tuples of field name and a boolean
that determines if that field should be sorted in descending
order
:type order_by: list of (str, bool) tuples
:param filters: a list of filters to apply
:param filters: dict of field name and filter value combinations
"""
limit_clause = 'LIMIT {}'.format(limit) if limit else ''
order_by_clause = ''
if order_by:
l = [] # noqa: E741
for field, desc in order_by:
l.append(field + ' DESC' if desc else '')
order_by_clause = 'ORDER BY ' + ', '.join(l)
where_clause = ''
if filters:
l = [] # noqa: E741
for field, value in filters.items():
l.append("{field} = '{value}'".format(**locals()))
where_clause = 'WHERE ' + ' AND '.join(l)
sql = textwrap.dedent("""\
SHOW PARTITIONS FROM {table_name}
{where_clause}
{order_by_clause}
{limit_clause}
""").format(**locals())
return sql
@classmethod
def _latest_partition_from_df(cls, df):
recs = df.to_records(index=False)
if recs:
return recs[0][0]
@classmethod
def latest_partition(cls, table_name, schema, database, show_first=False):
"""Returns col name and the latest (max) partition value for a table
:param table_name: the name of the table
:type table_name: str
:param schema: schema / database / namespace
:type schema: str
:param database: database query will be run against
:type database: models.Database
:param show_first: displays the value for the first partitioning key
if there are many partitioning keys
:type show_first: bool
>>> latest_partition('foo_table')
'2018-01-01'
"""
indexes = database.get_indexes(table_name, schema)
if len(indexes[0]['column_names']) < 1:
raise SupersetTemplateException(
'The table should have one partitioned field')
elif not show_first and len(indexes[0]['column_names']) > 1:
raise SupersetTemplateException(
'The table should have a single partitioned field '
'to use this function. You may want to use '
'`presto.latest_sub_partition`')
part_field = indexes[0]['column_names'][0]
sql = cls._partition_query(table_name, 1, [(part_field, True)])
df = database.get_df(sql, schema)
return part_field, cls._latest_partition_from_df(df)
@classmethod
def latest_sub_partition(cls, table_name, schema, database, **kwargs):
"""Returns the latest (max) partition value for a table
A filtering criteria should be passed for all fields that are
partitioned except for the field to be returned. For example,
if a table is partitioned by (``ds``, ``event_type`` and
``event_category``) and you want the latest ``ds``, you'll want
to provide a filter as keyword arguments for both
``event_type`` and ``event_category`` as in
``latest_sub_partition('my_table',
event_category='page', event_type='click')``
:param table_name: the name of the table, can be just the table
name or a fully qualified table name as ``schema_name.table_name``
:type table_name: str
:param schema: schema / database / namespace
:type schema: str
:param database: database query will be run against
:type database: models.Database
:param kwargs: keyword arguments define the filtering criteria
on the partition list. There can be many of these.
:type kwargs: str
>>> latest_sub_partition('sub_partition_table', event_type='click')
'2018-01-01'
"""
indexes = database.get_indexes(table_name, schema)
part_fields = indexes[0]['column_names']
for k in kwargs.keys():
if k not in k in part_fields:
msg = 'Field [{k}] is not part of the portioning key'
raise SupersetTemplateException(msg)
if len(kwargs.keys()) != len(part_fields) - 1:
msg = (
'A filter needs to be specified for {} out of the '
'{} fields.'
).format(len(part_fields) - 1, len(part_fields))
raise SupersetTemplateException(msg)
for field in part_fields:
if field not in kwargs.keys():
field_to_return = field
sql = cls._partition_query(
table_name, 1, [(field_to_return, True)], kwargs)
df = database.get_df(sql, schema)
if df.empty:
return ''
return df.to_dict()[field_to_return][0]
class HiveEngineSpec(PrestoEngineSpec):
"""Reuses PrestoEngineSpec functionality."""
engine = 'hive'
cursor_execute_kwargs = {'async': True}
# Scoping regex at class level to avoid recompiling
# 17/02/07 19:36:38 INFO ql.Driver: Total jobs = 5
jobs_stats_r = re.compile(
r'.*INFO.*Total jobs = (?P<max_jobs>[0-9]+)')
# 17/02/07 19:37:08 INFO ql.Driver: Launching Job 2 out of 5
launching_job_r = re.compile(
'.*INFO.*Launching Job (?P<job_number>[0-9]+) out of '
'(?P<max_jobs>[0-9]+)')
# 17/02/07 19:36:58 INFO exec.Task: 2017-02-07 19:36:58,152 Stage-18
# map = 0%, reduce = 0%
stage_progress_r = re.compile(
r'.*INFO.*Stage-(?P<stage_number>[0-9]+).*'
r'map = (?P<map_progress>[0-9]+)%.*'
r'reduce = (?P<reduce_progress>[0-9]+)%.*')
@classmethod
def patch(cls):
from pyhive import hive
from superset.db_engines import hive as patched_hive
from TCLIService import (
constants as patched_constants,
ttypes as patched_ttypes,
TCLIService as patched_TCLIService)
hive.TCLIService = patched_TCLIService
hive.constants = patched_constants
hive.ttypes = patched_ttypes
hive.Cursor.fetch_logs = patched_hive.fetch_logs
@classmethod
@cache_util.memoized_func(
timeout=600,
key=lambda *args, **kwargs: 'db:{}:{}'.format(args[0].id, args[1]))
def fetch_result_sets(cls, db, datasource_type, force=False):
return BaseEngineSpec.fetch_result_sets(
db, datasource_type, force=force)
@classmethod
def fetch_data(cls, cursor, limit):
from TCLIService import ttypes
state = cursor.poll()
if state.operationState == ttypes.TOperationState.ERROR_STATE:
raise Exception('Query error', state.errorMessage)
return super(HiveEngineSpec, cls).fetch_data(cursor, limit)
@staticmethod
def create_table_from_csv(form, table):
"""Uploads a csv file and creates a superset datasource in Hive."""
def convert_to_hive_type(col_type):
"""maps tableschema's types to hive types"""
tableschema_to_hive_types = {
'boolean': 'BOOLEAN',
'integer': 'INT',
'number': 'DOUBLE',
'string': 'STRING',
}
return tableschema_to_hive_types.get(col_type, 'STRING')
table_name = form.name.data
if config.get('UPLOADED_CSV_HIVE_NAMESPACE'):
if '.' in table_name:
raise Exception(
"You can't specify a namespace. "
'All tables will be uploaded to the `{}` namespace'.format(
config.get('HIVE_NAMESPACE')))
table_name = '{}.{}'.format(
config.get('UPLOADED_CSV_HIVE_NAMESPACE'), table_name)
filename = form.csv_file.data.filename
bucket_path = config['CSV_TO_HIVE_UPLOAD_S3_BUCKET']
if not bucket_path:
logging.info('No upload bucket specified')
raise Exception(
'No upload bucket specified. You can specify one in the config file.')
table_name = form.name.data
filename = form.csv_file.data.filename
upload_prefix = config['CSV_TO_HIVE_UPLOAD_DIRECTORY']
upload_path = config['UPLOAD_FOLDER'] + \
secure_filename(form.csv_file.data.filename)
hive_table_schema = Table(upload_path).infer()
column_name_and_type = []
for column_info in hive_table_schema['fields']:
column_name_and_type.append(
'{} {}'.format(
column_info['name'], convert_to_hive_type(column_info['type'])))
schema_definition = ', '.join(column_name_and_type)
s3 = boto3.client('s3')
location = os.path.join('s3a://', bucket_path, upload_prefix, table_name)
s3.upload_file(
upload_path, bucket_path,
os.path.join(upload_prefix, table_name, filename))
sql = """CREATE TABLE {table_name} ( {schema_definition} )
ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS
TEXTFILE LOCATION '{location}'
tblproperties ('skip.header.line.count'='1')""".format(**locals())
logging.info(form.con.data)
engine = create_engine(form.con.data.sqlalchemy_uri_decrypted)
engine.execute(sql)
@classmethod
def convert_dttm(cls, target_type, dttm):
tt = target_type.upper()
if tt == 'DATE':
return "CAST('{}' AS DATE)".format(dttm.isoformat()[:10])
elif tt == 'TIMESTAMP':
return "CAST('{}' AS TIMESTAMP)".format(
dttm.strftime('%Y-%m-%d %H:%M:%S'))
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
@classmethod
def adjust_database_uri(cls, uri, selected_schema=None):
if selected_schema:
uri.database = selected_schema
return uri
@classmethod
def extract_error_message(cls, e):
try:
msg = e.message.status.errorMessage
except Exception:
msg = str(e)
return msg
@classmethod
def progress(cls, log_lines):
total_jobs = 1 # assuming there's at least 1 job
current_job = 1
stages = {}
for line in log_lines:
match = cls.jobs_stats_r.match(line)
if match:
total_jobs = int(match.groupdict()['max_jobs']) or 1
match = cls.launching_job_r.match(line)
if match:
current_job = int(match.groupdict()['job_number'])
total_jobs = int(match.groupdict()['max_jobs']) or 1
stages = {}
match = cls.stage_progress_r.match(line)
if match:
stage_number = int(match.groupdict()['stage_number'])
map_progress = int(match.groupdict()['map_progress'])
reduce_progress = int(match.groupdict()['reduce_progress'])
stages[stage_number] = (map_progress + reduce_progress) / 2
logging.info(
'Progress detail: {}, '
'current job {}, '
'total jobs: {}'.format(stages, current_job, total_jobs))
stage_progress = sum(
stages.values()) / len(stages.values()) if stages else 0
progress = (
100 * (current_job - 1) / total_jobs + stage_progress / total_jobs
)
return int(progress)
@classmethod
def get_tracking_url(cls, log_lines):
lkp = 'Tracking URL = '
for line in log_lines:
if lkp in line:
return line.split(lkp)[1]
@classmethod
def handle_cursor(cls, cursor, query, session):
"""Updates progress information"""
from pyhive import hive
unfinished_states = (
hive.ttypes.TOperationState.INITIALIZED_STATE,
hive.ttypes.TOperationState.RUNNING_STATE,
)
polled = cursor.poll()
last_log_line = 0
tracking_url = None
job_id = None
while polled.operationState in unfinished_states:
query = session.query(type(query)).filter_by(id=query.id).one()
if query.status == QueryStatus.STOPPED:
cursor.cancel()
break
log = cursor.fetch_logs() or ''
if log:
log_lines = log.splitlines()
progress = cls.progress(log_lines)
logging.info('Progress total: {}'.format(progress))
needs_commit = False
if progress > query.progress:
query.progress = progress
needs_commit = True
if not tracking_url:
tracking_url = cls.get_tracking_url(log_lines)
if tracking_url:
job_id = tracking_url.split('/')[-2]
logging.info(
'Found the tracking url: {}'.format(tracking_url))
tracking_url = tracking_url_trans(tracking_url)
logging.info(
'Transformation applied: {}'.format(tracking_url))
query.tracking_url = tracking_url
logging.info('Job id: {}'.format(job_id))
needs_commit = True
if job_id and len(log_lines) > last_log_line:
# Wait for job id before logging things out
# this allows for prefixing all log lines and becoming
# searchable in something like Kibana
for l in log_lines[last_log_line:]:
logging.info('[{}] {}'.format(job_id, l))
last_log_line = len(log_lines)
if needs_commit:
session.commit()
time.sleep(hive_poll_interval)
polled = cursor.poll()
@classmethod
def where_latest_partition(
cls, table_name, schema, database, qry, columns=None):
try:
col_name, value = cls.latest_partition(
table_name, schema, database)
except Exception:
# table is not partitioned
return False
for c in columns:
if str(c.name) == str(col_name):
return qry.where(c == str(value))
return False
@classmethod
def latest_sub_partition(cls, table_name, schema, database, **kwargs):
# TODO(bogdan): implement`
pass
@classmethod
def _latest_partition_from_df(cls, df):
"""Hive partitions look like ds={partition name}"""
return df.ix[:, 0].max().split('=')[1]
@classmethod
def _partition_query(
cls, table_name, limit=0, order_by=None, filters=None):
return 'SHOW PARTITIONS {table_name}'.format(**locals())
@classmethod
def modify_url_for_impersonation(cls, url, impersonate_user, username):
"""
Modify the SQL Alchemy URL object with the user to impersonate if applicable.
:param url: SQLAlchemy URL object
:param impersonate_user: Bool indicating if impersonation is enabled
:param username: Effective username
"""
# Do nothing in the URL object since instead this should modify
# the configuraiton dictionary. See get_configuration_for_impersonation
pass
@classmethod
def get_configuration_for_impersonation(cls, uri, impersonate_user, username):
"""
Return a configuration dictionary that can be merged with other configs
that can set the correct properties for impersonating users
:param uri: URI string
:param impersonate_user: Bool indicating if impersonation is enabled
:param username: Effective username
:return: Dictionary with configs required for impersonation
"""
configuration = {}
url = make_url(uri)
backend_name = url.get_backend_name()
# Must be Hive connection, enable impersonation, and set param auth=LDAP|KERBEROS
if (backend_name == 'hive' and 'auth' in url.query.keys() and
impersonate_user is True and username is not None):
configuration['hive.server2.proxy.user'] = username
return configuration
class MssqlEngineSpec(BaseEngineSpec):
engine = 'mssql'
epoch_to_dttm = "dateadd(S, {col}, '1970-01-01')"
limit_method = LimitMethod.WRAP_SQL
time_grains = (
Grain('Time Column', _('Time Column'), '{col}', None),
Grain('second', _('second'), 'DATEADD(second, '
"DATEDIFF(second, '2000-01-01', {col}), '2000-01-01')",
'PT1S'),
Grain('minute', _('minute'), 'DATEADD(minute, '
'DATEDIFF(minute, 0, {col}), 0)',
'PT1M'),
Grain('5 minute', _('5 minute'), 'DATEADD(minute, '
'DATEDIFF(minute, 0, {col}) / 5 * 5, 0)',
'PT5M'),
Grain('half hour', _('half hour'), 'DATEADD(minute, '
'DATEDIFF(minute, 0, {col}) / 30 * 30, 0)',
'PT0.5H'),
Grain('hour', _('hour'), 'DATEADD(hour, '
'DATEDIFF(hour, 0, {col}), 0)',
'PT1H'),
Grain('day', _('day'), 'DATEADD(day, '
'DATEDIFF(day, 0, {col}), 0)',
'P1D'),
Grain('week', _('week'), 'DATEADD(week, '
'DATEDIFF(week, 0, {col}), 0)',
'P1W'),
Grain('month', _('month'), 'DATEADD(month, '
'DATEDIFF(month, 0, {col}), 0)',
'P1M'),
Grain('quarter', _('quarter'), 'DATEADD(quarter, '
'DATEDIFF(quarter, 0, {col}), 0)',
'P0.25Y'),
Grain('year', _('year'), 'DATEADD(year, '
'DATEDIFF(year, 0, {col}), 0)',
'P1Y'),
)
@classmethod
def convert_dttm(cls, target_type, dttm):
return "CONVERT(DATETIME, '{}', 126)".format(dttm.isoformat())
class AthenaEngineSpec(BaseEngineSpec):
engine = 'awsathena'
time_grains = (
Grain('Time Column', _('Time Column'), '{col}', None),
Grain('second', _('second'),
"date_trunc('second', CAST({col} AS TIMESTAMP))",
'PT1S'),
Grain('minute', _('minute'),
"date_trunc('minute', CAST({col} AS TIMESTAMP))",
'PT1M'),
Grain('hour', _('hour'),
"date_trunc('hour', CAST({col} AS TIMESTAMP))",
'PT1H'),
Grain('day', _('day'),
"date_trunc('day', CAST({col} AS TIMESTAMP))",
'P1D'),
Grain('week', _('week'),
"date_trunc('week', CAST({col} AS TIMESTAMP))",
'P1W'),
Grain('month', _('month'),
"date_trunc('month', CAST({col} AS TIMESTAMP))",
'P1M'),
Grain('quarter', _('quarter'),
"date_trunc('quarter', CAST({col} AS TIMESTAMP))",
'P0.25Y'),
Grain('week_ending_saturday', _('week_ending_saturday'),
"date_add('day', 5, date_trunc('week', date_add('day', 1, "
'CAST({col} AS TIMESTAMP))))',
'P1W/1970-01-03T00:00:00Z'),
Grain('week_start_sunday', _('week_start_sunday'),
"date_add('day', -1, date_trunc('week', "
"date_add('day', 1, CAST({col} AS TIMESTAMP))))",
'1969-12-28T00:00:00Z/P1W'),
)
@classmethod
def convert_dttm(cls, target_type, dttm):
tt = target_type.upper()
if tt == 'DATE':
return "from_iso8601_date('{}')".format(dttm.isoformat()[:10])
if tt == 'TIMESTAMP':
return "from_iso8601_timestamp('{}')".format(dttm.isoformat())
return ("CAST ('{}' AS TIMESTAMP)"
.format(dttm.strftime('%Y-%m-%d %H:%M:%S')))
@classmethod
def epoch_to_dttm(cls):
return 'from_unixtime({col})'
class ClickHouseEngineSpec(BaseEngineSpec):
"""Dialect for ClickHouse analytical DB."""
engine = 'clickhouse'
time_secondary_columns = True
time_groupby_inline = True
time_grains = (
Grain('Time Column', _('Time Column'), '{col}', None),
Grain('minute', _('minute'),
'toStartOfMinute(toDateTime({col}))',
'PT1M'),
Grain('5 minute', _('5 minute'),
'toDateTime(intDiv(toUInt32(toDateTime({col})), 300)*300)',
'PT5M'),
Grain('10 minute', _('10 minute'),
'toDateTime(intDiv(toUInt32(toDateTime({col})), 600)*600)',
'PT10M'),
Grain('hour', _('hour'),
'toStartOfHour(toDateTime({col}))',
'PT1H'),
Grain('day', _('day'),
'toStartOfDay(toDateTime({col}))',
'P1D'),
Grain('month', _('month'),
'toStartOfMonth(toDateTime({col}))',
'P1M'),
Grain('quarter', _('quarter'),
'toStartOfQuarter(toDateTime({col}))',
'P0.25Y'),
Grain('year', _('year'),
'toStartOfYear(toDateTime({col}))',
'P1Y'),
)
@classmethod
def convert_dttm(cls, target_type, dttm):
tt = target_type.upper()
if tt == 'DATE':
return "toDate('{}')".format(dttm.strftime('%Y-%m-%d'))
if tt == 'DATETIME':
return "toDateTime('{}')".format(
dttm.strftime('%Y-%m-%d %H:%M:%S'))
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
class BQEngineSpec(BaseEngineSpec):
"""Engine spec for Google's BigQuery
As contributed by @mxmzdlv on issue #945"""
engine = 'bigquery'
time_grains = (
Grain('Time Column', _('Time Column'), '{col}', None),
Grain('second', _('second'), 'TIMESTAMP_TRUNC({col}, SECOND)', 'PT1S'),
Grain('minute', _('minute'), 'TIMESTAMP_TRUNC({col}, MINUTE)', 'PT1M'),
Grain('hour', _('hour'), 'TIMESTAMP_TRUNC({col}, HOUR)', 'PT1H'),
Grain('day', _('day'), 'TIMESTAMP_TRUNC({col}, DAY)', 'P1D'),
Grain('week', _('week'), 'TIMESTAMP_TRUNC({col}, WEEK)', 'P1W'),
Grain('month', _('month'), 'TIMESTAMP_TRUNC({col}, MONTH)', 'P1M'),
Grain('quarter', _('quarter'),
'TIMESTAMP_TRUNC({col}, QUARTER)', 'P0.25Y'),
Grain('year', _('year'), 'TIMESTAMP_TRUNC({col}, YEAR)', 'P1Y'),
)
@classmethod
def convert_dttm(cls, target_type, dttm):
tt = target_type.upper()
if tt == 'DATE':
return "'{}'".format(dttm.strftime('%Y-%m-%d'))
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
@classmethod
def fetch_data(cls, cursor, limit):
data = super(BQEngineSpec, cls).fetch_data(cursor, limit)
if len(data) != 0 and type(data[0]).__name__ == 'Row':
data = [r.values() for r in data]
return data
class ImpalaEngineSpec(BaseEngineSpec):
"""Engine spec for Cloudera's Impala"""
engine = 'impala'
time_grains = (
Grain('Time Column', _('Time Column'), '{col}', None),
Grain('minute', _('minute'), "TRUNC({col}, 'MI')", 'PT1M'),
Grain('hour', _('hour'), "TRUNC({col}, 'HH')", 'PT1H'),
Grain('day', _('day'), "TRUNC({col}, 'DD')", 'P1D'),
Grain('week', _('week'), "TRUNC({col}, 'WW')", 'P1W'),
Grain('month', _('month'), "TRUNC({col}, 'MONTH')", 'P1M'),
Grain('quarter', _('quarter'), "TRUNC({col}, 'Q')", 'P0.25Y'),
Grain('year', _('year'), "TRUNC({col}, 'YYYY')", 'P1Y'),
)
@classmethod
def convert_dttm(cls, target_type, dttm):
tt = target_type.upper()
if tt == 'DATE':
return "'{}'".format(dttm.strftime('%Y-%m-%d'))
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
@classmethod
def get_schema_names(cls, inspector):
schemas = [row[0] for row in inspector.engine.execute('SHOW SCHEMAS')
if not row[0].startswith('_')]
return schemas
class DruidEngineSpec(BaseEngineSpec):
"""Engine spec for Druid.io"""
engine = 'druid'
inner_joins = False
time_grains = (
Grain('Time Column', _('Time Column'), '{col}', None),
Grain('second', _('second'), 'FLOOR({col} TO SECOND)', 'PT1S'),
Grain('minute', _('minute'), 'FLOOR({col} TO MINUTE)', 'PT1M'),
Grain('hour', _('hour'), 'FLOOR({col} TO HOUR)', 'PT1H'),
Grain('day', _('day'), 'FLOOR({col} TO DAY)', 'P1D'),
Grain('week', _('week'), 'FLOOR({col} TO WEEK)', 'P1W'),
Grain('month', _('month'), 'FLOOR({col} TO MONTH)', 'P1M'),
Grain('quarter', _('quarter'), 'FLOOR({col} TO QUARTER)', 'P3M'),
Grain('year', _('year'), 'FLOOR({col} TO YEAR)', 'P1Y'),
)
class KylinEngineSpec(BaseEngineSpec):
"""Dialect for Apache Kylin"""
engine = 'kylin'
time_grains = (
Grain('Time Column', _('Time Column'), '{col}', None),
Grain('second', _('second'),
'CAST(FLOOR(CAST({col} AS TIMESTAMP) TO SECOND) AS TIMESTAMP)',
'PT1S'),
Grain('minute', _('minute'),
'CAST(FLOOR(CAST({col} AS TIMESTAMP) TO MINUTE) AS TIMESTAMP)',
'PT1M'),
Grain('hour', _('hour'),
'CAST(FLOOR(CAST({col} AS TIMESTAMP) TO HOUR) AS TIMESTAMP)',
'PT1H'),
Grain('day', _('day'),
'CAST(FLOOR(CAST({col} AS TIMESTAMP) TO DAY) AS DATE)',
'P1D'),
Grain('week', _('week'),
'CAST(TIMESTAMPADD(WEEK, WEEK(CAST({col} AS DATE)) - 1, \
FLOOR(CAST({col} AS TIMESTAMP) TO YEAR)) AS DATE)',
'P1W'),
Grain('month', _('month'),
'CAST(FLOOR(CAST({col} AS TIMESTAMP) TO MONTH) AS DATE)',
'P1M'),
Grain('quarter', _('quarter'),
'CAST(TIMESTAMPADD(QUARTER, QUARTER(CAST({col} AS DATE)) - 1, \
FLOOR(CAST({col} AS TIMESTAMP) TO YEAR)) AS DATE)',
'P0.25Y'),
Grain('year', _('year'),
'CAST(FLOOR(CAST({col} AS TIMESTAMP) TO YEAR) AS DATE)',
'P1Y'),
)
@classmethod
def convert_dttm(cls, target_type, dttm):
tt = target_type.upper()
if tt == 'DATE':
return "CAST('{}' AS DATE)".format(dttm.isoformat()[:10])
if tt == 'TIMESTAMP':
return "CAST('{}' AS TIMESTAMP)".format(
dttm.strftime('%Y-%m-%d %H:%M:%S'))
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
engines = {
o.engine: o for o in globals().values()
if inspect.isclass(o) and issubclass(o, BaseEngineSpec)}
| apache-2.0 |
aetilley/scikit-learn | sklearn/semi_supervised/label_propagation.py | 128 | 15312 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
jaidevd/scikit-learn | sklearn/preprocessing/data.py | 5 | 68210 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# Giorgio Patrini <[email protected]>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
from itertools import combinations_with_replacement as combinations_w_r
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.extmath import _incremental_mean_and_var
from ..utils.fixes import bincount
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis,
min_max_axis)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
def _handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix}
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
See also
--------
StandardScaler: Performs scaling to unit variance using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.mean(X, axis)
if with_std:
scale_ = np.std(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# subtract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray, shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_*
data_max_ : ndarray, shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_*
data_range_ : ndarray, shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_*
See also
--------
minmax_scale: Equivalent function without the object oriented API.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if sparse.issparse(X):
raise TypeError("MinMaxScaler does no support sparse input. "
"You may consider to use MaxAbsScaler instead.")
X = check_array(X, copy=self.copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
data_min = np.min(X, axis=0)
data_max = np.max(X, axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next steps
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, dtype=FLOAT_DTYPES)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface
to :class:`sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MinMaxScaler: Performs scaling to a given range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True,
dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_*
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
var_ : array of floats with shape [n_features]
The variance for each feature in the training set. Used to compute
`scale_`
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
scale: Equivalent function without the object oriented API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with 'whiten=True'.
""" # noqa
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES)
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.with_std:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
else:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_ = .0
self.n_samples_seen_ = 0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray, shape (n_features,)
Per feature maximum absolute value.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
maxabs_scale: Equivalent function without the object oriented API.
"""
def __init__(self, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.abs(X).max(axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : {array-like, sparse matrix}
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix}
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MaxAbsScaler: Performs scaling to the [-1, 1] range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the quantile range (defaults to IQR: Interquartile Range).
The IQR is the range between the 1st quartile (25th quantile)
and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
.. versionadded:: 0.17
*scale_* attribute.
See also
--------
robust_scale: Equivalent function without the object oriented API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with
'whiten=True'.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
https://en.wikipedia.org/wiki/Median_(statistics)
https://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.quantile_range = quantile_range
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q_min, q_max = self.quantile_range
if not 0 <= q_min <= q_max <= 100:
raise ValueError("Invalid quantile range: %s" %
str(self.quantile_range))
q = np.percentile(X, self.quantile_range, axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
RobustScaler: Performs centering and scaling using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
quantile_range=quantile_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0., 0., 1.],
[ 1., 2., 3., 4., 6., 9.],
[ 1., 4., 5., 16., 20., 25.]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0.],
[ 1., 2., 3., 6.],
[ 1., 4., 5., 20.]])
Attributes
----------
powers_ : array, shape (n_output_features, n_input_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(bincount(c, minlength=self.n_input_features_)
for c in combinations)
def get_feature_names(self, input_features=None):
"""
Return feature names for output features
Parameters
----------
input_features : list of string, length n_features, optional
String names for input features if available. By default,
"x0", "x1", ... "xn_features" is used.
Returns
-------
output_feature_names : list of string, length n_output_features
"""
powers = self.powers_
if input_features is None:
input_features = ['x%d' % i for i in range(powers.shape[1])]
feature_names = []
for row in powers:
inds = np.where(row)[0]
if len(inds):
name = " ".join("%s^%d" % (input_features[ind], exp)
if exp != 1 else input_features[ind]
for ind, exp in zip(inds, row[inds]))
else:
name = "1"
feature_names.append(name)
return feature_names
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X, dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
return_norm : boolean, default False
whether to return the computed norms
Returns
-------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Normalized input X.
norms : array, shape [n_samples] if axis=1 else [n_features]
An array of norms along given axis for X.
When X is sparse, a NotImplementedError will be raised
for norm 'l1' or 'l2'.
See also
--------
Normalizer: Performs normalization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if return_norm and norm in ('l1', 'l2'):
raise NotImplementedError("return_norm=True is not implemented "
"for sparse matrices with norm 'l1' "
"or norm 'l2'")
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms_elementwise = norms.repeat(np.diff(X.indptr))
mask = norms_elementwise != 0
X.data[mask] /= norms_elementwise[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
normalize: Equivalent function without the object oriented API.
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
Binarizer: Performs binarization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
binarize: Equivalent function without the object oriented API.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K, dtype=FLOAT_DTYPES)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K, copy=copy, dtype=FLOAT_DTYPES)
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
@property
def _pairwise(self):
return True
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {array, sparse matrix}, shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES)
if isinstance(selected, six.string_types) and selected == "all":
return transform(X)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Note: a one-hot encoding of y labels should use a LabelBinarizer
instead.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : number of categorical values per feature.
Each feature value should be in ``range(n_values)``
- array : ``n_values[i]`` is the number of categorical values in
``X[:, i]``. Each feature value should be
in ``range(n_values[i])``
categorical_features : "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
sklearn.preprocessing.LabelBinarizer : binarizes labels in a one-vs-all
fashion.
sklearn.preprocessing.MultiLabelBinarizer : transforms between iterable of
iterables and a multilabel format, e.g. a (samples x classes) binary
matrix indicating the presence of a class label.
sklearn.preprocessing.LabelEncoder : encodes labels with values between 0
and n_classes-1.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float64, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those categorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X.ravel()[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
spatialaudio/sweep | lin_sweep_kaiser_window_bandlimited_script5/lin_sweep_kaiser_window_bandlimited_script5_1.py | 2 | 2216 | #!/usr/bin/env python3
"""The influence of windowing of lin. bandlimited sweep signals when using a
Kaiser Window by fixing beta (=2) and fade_out (=0).
fstart = 100 Hz
fstop = 5000 Hz
Deconvolution: Unwindowed
"""
import sys
sys.path.append('..')
import measurement_chain
import plotting
import calculation
import generation
import matplotlib.pyplot as plt
import windows
from scipy.signal import lfilter
import numpy as np
# Parameters of the measuring system
fs = 44100
fstart = 100
fstop = 5000
duration = 1
pad = 4
# Generate excitation signal
excitation = generation.lin_sweep(fstart, fstop, duration, fs)
N = len(excitation)
# Noise in measurement chain
noise_level_db = -30
noise = measurement_chain.additive_noise(noise_level_db)
# FIR-Filter-System
dirac_system = measurement_chain.convolution([1.0])
# Combinate system elements
system = measurement_chain.chained(dirac_system, noise)
# Spectrum of dirac for reference
dirac = np.zeros(pad * fs)
dirac[0] = 1
dirac_f = np.fft.rfft(dirac)
# Lists
beta = 7
fade_in_list = np.arange(0, 1001, 1)
fade_out = 0
def get_results(fade_in):
excitation_windowed = excitation * windows.window_kaiser(N,
fade_in,
fade_out,
fs, beta)
excitation_windowed_zeropadded = generation.zero_padding(
excitation_windowed, pad, fs)
excitation_zeropadded = generation.zero_padding(excitation, pad, fs)
system_response = system(excitation_windowed_zeropadded)
ir = calculation.deconv_process(excitation_zeropadded,
system_response,
fs)
return ir
with open("lin_sweep_kaiser_window_bandlimited_script5_1.txt", "w") as f:
for fade_in in fade_in_list:
ir = get_results(fade_in)
pnr = calculation.pnr_db(ir[0], ir[1:4 * fs])
spectrum_distance = calculation.vector_distance(
dirac_f, np.fft.rfft(ir[:pad * fs]))
f.write(
str(fade_in) + " " + str(pnr) +
" " + str(spectrum_distance) + " \n")
| mit |
chaluemwut/fbserver | venv/lib/python2.7/site-packages/scipy/stats/morestats.py | 7 | 78330 | # Author: Travis Oliphant, 2002
#
# Further updates and enhancements by many SciPy developers.
#
from __future__ import division, print_function, absolute_import
import math
import warnings
import numpy as np
from numpy import (isscalar, r_, log, sum, around, unique, asarray,
zeros, arange, sort, amin, amax, any, atleast_1d, sqrt, ceil,
floor, array, poly1d, compress, not_equal, pi, exp, ravel, angle)
from numpy.testing.decorators import setastest
from scipy.lib.six import string_types
from scipy.lib._numpy_compat import count_nonzero
from scipy import optimize
from scipy import special
from . import statlib
from . import stats
from .stats import find_repeats
from .contingency import chi2_contingency
from . import distributions
from ._distn_infrastructure import rv_generic
__all__ = ['mvsdist',
'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot',
'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot',
'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test',
'fligner', 'mood', 'wilcoxon', 'median_test',
'pdf_fromgamma', 'circmean', 'circvar', 'circstd', 'anderson_ksamp'
]
def bayes_mvs(data, alpha=0.90):
"""
Bayesian confidence intervals for the mean, var, and std.
Parameters
----------
data : array_like
Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`.
Requires 2 or more data points.
alpha : float, optional
Probability that the returned confidence interval contains
the true parameter.
Returns
-------
mean_cntr, var_cntr, std_cntr : tuple
The three results are for the mean, variance and standard deviation,
respectively. Each result is a tuple of the form::
(center, (lower, upper))
with `center` the mean of the conditional pdf of the value given the
data, and `(lower, upper)` a confidence interval, centered on the
median, containing the estimate to a probability `alpha`.
Notes
-----
Each tuple of mean, variance, and standard deviation estimates represent
the (center, (lower, upper)) with center the mean of the conditional pdf
of the value given the data and (lower, upper) is a confidence interval
centered on the median, containing the estimate to a probability
`alpha`.
Converts data to 1-D and assumes all data has the same mean and variance.
Uses Jeffrey's prior for variance and std.
Equivalent to tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", http://hdl.handle.net/1877/438, 2006.
"""
res = mvsdist(data)
if alpha >= 1 or alpha <= 0:
raise ValueError("0 < alpha < 1 is required, but alpha=%s was given." % alpha)
return tuple((x.mean(), x.interval(alpha)) for x in res)
def mvsdist(data):
"""
'Frozen' distributions for mean, variance, and standard deviation of data.
Parameters
----------
data : array_like
Input array. Converted to 1-D using ravel.
Requires 2 or more data-points.
Returns
-------
mdist : "frozen" distribution object
Distribution object representing the mean of the data
vdist : "frozen" distribution object
Distribution object representing the variance of the data
sdist : "frozen" distribution object
Distribution object representing the standard deviation of the data
Notes
-----
The return values from bayes_mvs(data) is equivalent to
``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``.
In other words, calling ``<dist>.mean()`` and ``<dist>.interval(0.90)``
on the three distribution objects returned from this function will give
the same results that are returned from `bayes_mvs`.
Examples
--------
>>> from scipy.stats import mvsdist
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = mvsdist(data)
We now have frozen distribution objects "mean", "var" and "std" that we can
examine:
>>> mean.mean()
9.0
>>> mean.interval(0.95)
(6.6120585482655692, 11.387941451734431)
>>> mean.std()
1.1952286093343936
"""
x = ravel(data)
n = len(x)
if (n < 2):
raise ValueError("Need at least 2 data-points.")
xbar = x.mean()
C = x.var()
if (n > 1000): # gaussian approximations for large n
mdist = distributions.norm(loc=xbar, scale=math.sqrt(C/n))
sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C/(2.*n)))
vdist = distributions.norm(loc=C, scale=math.sqrt(2.0/n)*C)
else:
nm1 = n-1
fac = n*C/2.
val = nm1/2.
mdist = distributions.t(nm1,loc=xbar,scale=math.sqrt(C/nm1))
sdist = distributions.gengamma(val,-2,scale=math.sqrt(fac))
vdist = distributions.invgamma(val,scale=fac)
return mdist, vdist, sdist
def kstat(data,n=2):
"""
Return the nth k-statistic (1<=n<=4 so far).
The nth k-statistic is the unique symmetric unbiased estimator of the nth
cumulant kappa_n.
Parameters
----------
data : array_like
Input array.
n : int, {1, 2, 3, 4}, optional
Default is equal to 2.
Returns
-------
kstat : float
The nth k-statistic.
See Also
--------
kstatvar: Returns an unbiased estimator of the variance of the k-statistic.
Notes
-----
The cumulants are related to central moments but are specifically defined
using a power series expansion of the logarithm of the characteristic
function (which is the Fourier transform of the PDF).
In particular let phi(t) be the characteristic function, then::
ln phi(t) = > kappa_n (it)^n / n! (sum from n=0 to inf)
The first few cumulants (kappa_n) in terms of central moments (mu_n) are::
kappa_1 = mu_1
kappa_2 = mu_2
kappa_3 = mu_3
kappa_4 = mu_4 - 3*mu_2**2
kappa_5 = mu_5 - 10*mu_2 * mu_3
References
----------
http://mathworld.wolfram.com/k-Statistic.html
http://mathworld.wolfram.com/Cumulant.html
"""
if n > 4 or n < 1:
raise ValueError("k-statistics only supported for 1<=n<=4")
n = int(n)
S = zeros(n+1,'d')
data = ravel(data)
N = len(data)
for k in range(1,n+1):
S[k] = sum(data**k,axis=0)
if n == 1:
return S[1]*1.0/N
elif n == 2:
return (N*S[2]-S[1]**2.0)/(N*(N-1.0))
elif n == 3:
return (2*S[1]**3 - 3*N*S[1]*S[2]+N*N*S[3]) / (N*(N-1.0)*(N-2.0))
elif n == 4:
return (-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 -
4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) / \
(N*(N-1.0)*(N-2.0)*(N-3.0))
else:
raise ValueError("Should not be here.")
def kstatvar(data,n=2):
"""
Returns an unbiased estimator of the variance of the k-statistic.
See `kstat` for more details of the k-statistic.
Parameters
----------
data : array_like
Input array.
n : int, {1, 2}, optional
Default is equal to 2.
Returns
-------
kstatvar : float
The nth k-statistic variance.
See Also
--------
kstat
"""
data = ravel(data)
N = len(data)
if n == 1:
return kstat(data,n=2)*1.0/N
elif n == 2:
k2 = kstat(data,n=2)
k4 = kstat(data,n=4)
return (2*k2*k2*N + (N-1)*k4)/(N*(N+1))
else:
raise ValueError("Only n=1 or n=2 supported.")
def _calc_uniform_order_statistic_medians(x):
"""See Notes section of `probplot` for details."""
N = len(x)
osm_uniform = np.zeros(N, dtype=np.float64)
osm_uniform[-1] = 0.5**(1.0 / N)
osm_uniform[0] = 1 - osm_uniform[-1]
i = np.arange(2, N)
osm_uniform[1:-1] = (i - 0.3175) / (N + 0.365)
return osm_uniform
def _parse_dist_kw(dist, enforce_subclass=True):
"""Parse `dist` keyword.
Parameters
----------
dist : str or stats.distributions instance.
Several functions take `dist` as a keyword, hence this utility
function.
enforce_subclass : bool, optional
If True (default), `dist` needs to be a
`_distn_infrastructure.rv_generic` instance.
It can sometimes be useful to set this keyword to False, if a function
wants to accept objects that just look somewhat like such an instance
(for example, they have a ``ppf`` method).
"""
if isinstance(dist, rv_generic):
pass
elif isinstance(dist, string_types):
try:
dist = getattr(distributions, dist)
except AttributeError:
raise ValueError("%s is not a valid distribution name" % dist)
elif enforce_subclass:
msg = ("`dist` should be a stats.distributions instance or a string "
"with the name of such a distribution.")
raise ValueError(msg)
return dist
def probplot(x, sparams=(), dist='norm', fit=True, plot=None):
"""
Calculate quantiles for a probability plot, and optionally show the plot.
Generates a probability plot of sample data against the quantiles of a
specified theoretical distribution (the normal distribution by default).
`probplot` optionally calculates a best-fit line for the data and plots the
results using Matplotlib or a given plot function.
Parameters
----------
x : array_like
Sample/response data from which `probplot` creates the plot.
sparams : tuple, optional
Distribution-specific shape parameters (shape parameters plus location
and scale).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. The default is 'norm' for a
normal probability plot. Objects that look enough like a
stats.distributions instance (i.e. they have a ``ppf`` method) are also
accepted.
fit : bool, optional
Fit a least-squares regression (best-fit) line to the sample data if
True (default).
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
Returns
-------
(osm, osr) : tuple of ndarrays
Tuple of theoretical quantiles (osm, or order statistic medians) and
ordered responses (osr). `osr` is simply sorted input `x`.
For details on how `osm` is calculated see the Notes section.
(slope, intercept, r) : tuple of floats, optional
Tuple containing the result of the least-squares fit, if that is
performed by `probplot`. `r` is the square root of the coefficient of
determination. If ``fit=False`` and ``plot=None``, this tuple is not
returned.
Notes
-----
Even if `plot` is given, the figure is not shown or saved by `probplot`;
``plt.show()`` or ``plt.savefig('figname.png')`` should be used after
calling `probplot`.
`probplot` generates a probability plot, which should not be confused with
a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this
type, see ``statsmodels.api.ProbPlot``.
The formula used for the theoretical quantiles (horizontal axis of the
probability plot) is Filliben's estimate::
quantiles = dist.ppf(val), for
0.5**(1/n), for i = n
val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1
1 - 0.5**(1/n), for i = 1
where ``i`` indicates the i-th ordered value and ``n`` is the total number
of values.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> nsample = 100
>>> np.random.seed(7654321)
A t distribution with small degrees of freedom:
>>> ax1 = plt.subplot(221)
>>> x = stats.t.rvs(3, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A t distribution with larger degrees of freedom:
>>> ax2 = plt.subplot(222)
>>> x = stats.t.rvs(25, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A mixture of two normal distributions with broadcasting:
>>> ax3 = plt.subplot(223)
>>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5],
... size=(nsample/2.,2)).ravel()
>>> res = stats.probplot(x, plot=plt)
A standard normal distribution:
>>> ax4 = plt.subplot(224)
>>> x = stats.norm.rvs(loc=0, scale=1, size=nsample)
>>> res = stats.probplot(x, plot=plt)
Produce a new figure with a loggamma distribution, using the ``dist`` and
``sparams`` keywords:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> x = stats.loggamma.rvs(c=2.5, size=500)
>>> stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax)
>>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5")
Show the results with Matplotlib:
>>> plt.show()
"""
x = np.asarray(x)
osm_uniform = _calc_uniform_order_statistic_medians(x)
dist = _parse_dist_kw(dist, enforce_subclass=False)
if sparams is None:
sparams = ()
if isscalar(sparams):
sparams = (sparams,)
if not isinstance(sparams, tuple):
sparams = tuple(sparams)
osm = dist.ppf(osm_uniform, *sparams)
osr = sort(x)
if fit or (plot is not None):
# perform a linear fit.
slope, intercept, r, prob, sterrest = stats.linregress(osm, osr)
if plot is not None:
plot.plot(osm, osr, 'bo', osm, slope*osm + intercept, 'r-')
try:
if hasattr(plot, 'set_title'):
# Matplotlib Axes instance or something that looks like it
plot.set_title('Probability Plot')
plot.set_xlabel('Quantiles')
plot.set_ylabel('Ordered Values')
else:
# matplotlib.pyplot module
plot.title('Probability Plot')
plot.xlabel('Quantiles')
plot.ylabel('Ordered Values')
except:
# Not an MPL object or something that looks (enough) like it.
# Don't crash on adding labels or title
pass
# Add R^2 value to the plot as text
xmin = amin(osm)
xmax = amax(osm)
ymin = amin(x)
ymax = amax(x)
posx = xmin + 0.70 * (xmax - xmin)
posy = ymin + 0.01 * (ymax - ymin)
plot.text(posx, posy, "$R^2=%1.4f$" % r**2)
if fit:
return (osm, osr), (slope, intercept, r)
else:
return osm, osr
def ppcc_max(x, brack=(0.0,1.0), dist='tukeylambda'):
"""Returns the shape parameter that maximizes the probability plot
correlation coefficient for the given data to a one-parameter
family of distributions.
See also ppcc_plot
"""
dist = _parse_dist_kw(dist)
osm_uniform = _calc_uniform_order_statistic_medians(x)
osr = sort(x)
# this function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation)
# and returns 1-r so that a minimization function maximizes the
# correlation
def tempfunc(shape, mi, yvals, func):
xvals = func(mi, shape)
r, prob = stats.pearsonr(xvals, yvals)
return 1-r
return optimize.brent(tempfunc, brack=brack, args=(osm_uniform, osr, dist.ppf))
def ppcc_plot(x,a,b,dist='tukeylambda', plot=None, N=80):
"""Returns (shape, ppcc), and optionally plots shape vs. ppcc
(probability plot correlation coefficient) as a function of shape
parameter for a one-parameter family of distributions from shape
value a to b.
See also ppcc_max
"""
svals = r_[a:b:complex(N)]
ppcc = svals*0.0
k = 0
for sval in svals:
r1,r2 = probplot(x,sval,dist=dist,fit=1)
ppcc[k] = r2[-1]
k += 1
if plot is not None:
plot.plot(svals, ppcc, 'x')
plot.title('(%s) PPCC Plot' % dist)
plot.xlabel('Prob Plot Corr. Coef.')
plot.ylabel('Shape Values')
return svals, ppcc
def boxcox_llf(lmb, data):
r"""The boxcox log-likelihood function.
Parameters
----------
lmb : scalar
Parameter for Box-Cox transformation. See `boxcox` for details.
data : array_like
Data to calculate Box-Cox log-likelihood for. If `data` is
multi-dimensional, the log-likelihood is calculated along the first
axis.
Returns
-------
llf : float or ndarray
Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`,
an array otherwise.
See Also
--------
boxcox, probplot, boxcox_normplot, boxcox_normmax
Notes
-----
The Box-Cox log-likelihood function is defined here as
.. math::
llf = (\lambda - 1) \sum_i(\log(x_i)) -
N/2 \log(\sum_i (y_i - \bar{y})^2 / N),
where ``y`` is the Box-Cox transformed input data ``x``.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes
>>> np.random.seed(1245)
Generate some random variates and calculate Box-Cox log-likelihood values
for them for a range of ``lmbda`` values:
>>> x = stats.loggamma.rvs(5, loc=10, size=1000)
>>> lmbdas = np.linspace(-2, 10)
>>> llf = np.zeros(lmbdas.shape, dtype=np.float)
>>> for ii, lmbda in enumerate(lmbdas):
... llf[ii] = stats.boxcox_llf(lmbda, x)
Also find the optimal lmbda value with `boxcox`:
>>> x_most_normal, lmbda_optimal = stats.boxcox(x)
Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a
horizontal line to check that that's really the optimum:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(lmbdas, llf, 'b.-')
>>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r')
>>> ax.set_xlabel('lmbda parameter')
>>> ax.set_ylabel('Box-Cox log-likelihood')
Now add some probability plots to show that where the log-likelihood is
maximized the data transformed with `boxcox` looks closest to normal:
>>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right'
>>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs):
... xt = stats.boxcox(x, lmbda=lmbda)
... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt)
... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc)
... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-')
... ax_inset.set_xticklabels([])
... ax_inset.set_yticklabels([])
... ax_inset.set_title('$\lambda=%1.2f$' % lmbda)
>>> plt.show()
"""
data = np.asarray(data)
N = data.shape[0]
if N == 0:
return np.nan
y = boxcox(data, lmb)
y_mean = np.mean(y, axis=0)
llf = (lmb - 1) * np.sum(np.log(data), axis=0)
llf -= N / 2.0 * np.log(np.sum((y - y_mean)**2. / N, axis=0))
return llf
def _boxcox_conf_interval(x, lmax, alpha):
# Need to find the lambda for which
# f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1
fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1)
target = boxcox_llf(lmax, x) - fac
def rootfunc(lmbda, data, target):
return boxcox_llf(lmbda, data) - target
# Find positive endpoint of interval in which answer is to be found
newlm = lmax + 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm += 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target))
# Now find negative interval in the same way
newlm = lmax - 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm -= 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target))
return lmminus, lmplus
def boxcox(x, lmbda=None, alpha=None):
r"""
Return a positive dataset transformed by a Box-Cox power transformation.
Parameters
----------
x : ndarray
Input array. Should be 1-dimensional.
lmbda : {None, scalar}, optional
If `lmbda` is not None, do the transformation for that value.
If `lmbda` is None, find the lambda that maximizes the log-likelihood
function and return it as the second output argument.
alpha : {None, float}, optional
If `alpha` is not None, return the ``100 * (1-alpha)%`` confidence
interval for `lmbda` as the third output argument.
Must be between 0.0 and 1.0.
Returns
-------
boxcox : ndarray
Box-Cox power transformed array.
maxlog : float, optional
If the `lmbda` parameter is None, the second returned argument is
the lambda that maximizes the log-likelihood function.
(min_ci, max_ci) : tuple of float, optional
If `lmbda` parameter is None and `alpha` is not None, this returned
tuple of floats represents the minimum and maximum confidence limits
given `alpha`.
See Also
--------
probplot, boxcox_normplot, boxcox_normmax, boxcox_llf
Notes
-----
The Box-Cox transform is given by::
y = (x**lmbda - 1) / lmbda, for lmbda > 0
log(x), for lmbda = 0
`boxcox` requires the input data to be positive. Sometimes a Box-Cox
transformation provides a shift parameter to achieve this; `boxcox` does
not. Such a shift parameter is equivalent to adding a positive constant to
`x` before calling `boxcox`.
The confidence limits returned when `alpha` is provided give the interval
where:
.. math::
llf(\hat{\lambda}) - llf(\lambda) < \frac{1}{2}\chi^2(1 - \alpha, 1),
with ``llf`` the log-likelihood function and :math:`\chi^2` the chi-squared
function.
References
----------
G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the
Royal Statistical Society B, 26, 211-252 (1964).
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
We generate some random variates from a non-normal distribution and make a
probability plot for it, to show it is non-normal in the tails:
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> stats.probplot(x, dist=stats.norm, plot=ax1)
>>> ax1.set_xlabel('')
>>> ax1.set_title('Probplot against normal distribution')
We now use `boxcox` to transform the data so it's closest to normal:
>>> ax2 = fig.add_subplot(212)
>>> xt, _ = stats.boxcox(x)
>>> stats.probplot(xt, dist=stats.norm, plot=ax2)
>>> ax2.set_title('Probplot after Box-Cox transformation')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if any(x <= 0):
raise ValueError("Data must be positive.")
if lmbda is not None: # single transformation
return special.boxcox(x, lmbda)
# If lmbda=None, find the lmbda that maximizes the log-likelihood function.
lmax = boxcox_normmax(x, method='mle')
y = boxcox(x, lmax)
if alpha is None:
return y, lmax
else:
# Find confidence interval
interval = _boxcox_conf_interval(x, lmax, alpha)
return y, lmax, interval
def boxcox_normmax(x, brack=(-2.0, 2.0), method='pearsonr'):
"""Compute optimal Box-Cox transform parameter for input data.
Parameters
----------
x : array_like
Input array.
brack : 2-tuple, optional
The starting interval for a downhill bracket search with
`optimize.brent`. Note that this is in most cases not critical; the
final result is allowed to be outside this bracket.
method : str, optional
The method to determine the optimal transform parameter (`boxcox`
``lmbda`` parameter). Options are:
'pearsonr' (default)
Maximizes the Pearson correlation coefficient between
``y = boxcox(x)`` and the expected values for ``y`` if `x` would be
normally-distributed.
'mle'
Minimizes the log-likelihood `boxcox_llf`. This is the method used
in `boxcox`.
'all'
Use all optimization methods available, and return all results.
Useful to compare different methods.
Returns
-------
maxlog : float or ndarray
The optimal transform parameter found. An array instead of a scalar
for ``method='all'``.
See Also
--------
boxcox, boxcox_llf, boxcox_normplot
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234) # make this example reproducible
Generate some data and determine optimal ``lmbda`` in various ways:
>>> x = stats.loggamma.rvs(5, size=30) + 5
>>> y, lmax_mle = stats.boxcox(x)
>>> lmax_pearsonr = stats.boxcox_normmax(x)
>>> lmax_mle
7.177...
>>> lmax_pearsonr
7.916...
>>> stats.boxcox_normmax(x, method='all')
array([ 7.91667384, 7.17718692])
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> stats.boxcox_normplot(x, -10, 10, plot=ax)
>>> ax.axvline(lmax_mle, color='r')
>>> ax.axvline(lmax_pearsonr, color='g', ls='--')
>>> plt.show()
"""
def _pearsonr(x, brack):
osm_uniform = _calc_uniform_order_statistic_medians(x)
xvals = distributions.norm.ppf(osm_uniform)
def _eval_pearsonr(lmbda, xvals, samps):
# This function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation) and
# returns ``1 - r`` so that a minimization function maximizes the
# correlation.
y = boxcox(samps, lmbda)
yvals = np.sort(y)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(_eval_pearsonr, brack=brack, args=(xvals, x))
def _mle(x, brack):
def _eval_mle(lmb, data):
# function to minimize
return -boxcox_llf(lmb, data)
return optimize.brent(_eval_mle, brack=brack, args=(x,))
def _all(x, brack):
maxlog = np.zeros(2, dtype=np.float)
maxlog[0] = _pearsonr(x, brack)
maxlog[1] = _mle(x, brack)
return maxlog
methods = {'pearsonr': _pearsonr,
'mle': _mle,
'all': _all}
if method not in methods.keys():
raise ValueError("Method %s not recognized." % method)
optimfunc = methods[method]
return optimfunc(x, brack)
def boxcox_normplot(x, la, lb, plot=None, N=80):
"""Compute parameters for a Box-Cox normality plot, optionally show it.
A Box-Cox normality plot shows graphically what the best transformation
parameter is to use in `boxcox` to obtain a distribution that is close
to normal.
Parameters
----------
x : array_like
Input array.
la, lb : scalar
The lower and upper bounds for the ``lmbda`` values to pass to `boxcox`
for Box-Cox transformations. These are also the limits of the
horizontal axis of the plot if that is generated.
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`la` to `lb`).
Returns
-------
lmbdas : ndarray
The ``lmbda`` values for which a Box-Cox transform was done.
ppcc : ndarray
Probability Plot Correlelation Coefficient, as obtained from `probplot`
when fitting the Box-Cox transformed input `x` against a normal
distribution.
See Also
--------
probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max
Notes
-----
Even if `plot` is given, the figure is not shown or saved by
`boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')``
should be used after calling `probplot`.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Generate some non-normally distributed data, and create a Box-Cox plot:
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> stats.boxcox_normplot(x, -20, 20, plot=ax)
Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in
the same plot:
>>> _, maxlog = stats.boxcox(x)
>>> ax.axvline(maxlog, color='r')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if lb <= la:
raise ValueError("`lb` has to be larger than `la`.")
lmbdas = np.linspace(la, lb, num=N)
ppcc = lmbdas * 0.0
for i, val in enumerate(lmbdas):
# Determine for each lmbda the correlation coefficient of transformed x
z = boxcox(x, lmbda=val)
_, r2 = probplot(z, dist='norm', fit=True)
ppcc[i] = r2[-1]
if plot is not None:
plot.plot(lmbdas, ppcc, 'x')
try:
if hasattr(plot, 'set_title'):
# Matplotlib Axes instance or something that looks like it
plot.set_title('Box-Cox Normality Plot')
plot.set_ylabel('Prob Plot Corr. Coef.')
plot.set_xlabel('$\lambda$')
else:
# matplotlib.pyplot module
plot.title('Box-Cox Normality Plot')
plot.ylabel('Prob Plot Corr. Coef.')
plot.xlabel('$\lambda$')
except Exception:
# Not an MPL object or something that looks (enough) like it.
# Don't crash on adding labels or title
pass
return lmbdas, ppcc
def shapiro(x, a=None, reta=False):
"""
Perform the Shapiro-Wilk test for normality.
The Shapiro-Wilk test tests the null hypothesis that the
data was drawn from a normal distribution.
Parameters
----------
x : array_like
Array of sample data.
a : array_like, optional
Array of internal parameters used in the calculation. If these
are not given, they will be computed internally. If x has length
n, then a must have length n/2.
reta : bool, optional
Whether or not to return the internally computed a values. The
default is False.
Returns
-------
W : float
The test statistic.
p-value : float
The p-value for the hypothesis test.
a : array_like, optional
If `reta` is True, then these are the internally computed "a"
values that may be passed into this function on future calls.
See Also
--------
anderson : The Anderson-Darling test for normality
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
"""
N = len(x)
if N < 3:
raise ValueError("Data must be at least length 3.")
if a is None:
a = zeros(N,'f')
init = 0
else:
if len(a) != N//2:
raise ValueError("len(a) must equal len(x)/2")
init = 1
y = sort(x)
a, w, pw, ifault = statlib.swilk(y, a[:N//2], init)
if ifault not in [0,2]:
warnings.warn(str(ifault))
if N > 5000:
warnings.warn("p-value may not be accurate for N > 5000.")
if reta:
return w, pw, a
else:
return w, pw
# Values from Stephens, M A, "EDF Statistics for Goodness of Fit and
# Some Comparisons", Journal of he American Statistical
# Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737
_Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092])
_Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957])
# From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution",
# Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588.
_Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038])
# From Stephens, M A, "Tests of Fit for the Logistic Distribution Based
# on the Empirical Distribution Function.", Biometrika,
# Vol. 66, Issue 3, Dec. 1979, pp 591-595.
_Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010])
def anderson(x,dist='norm'):
"""
Anderson-Darling test for data coming from a particular distribution
The Anderson-Darling test is a modification of the Kolmogorov-
Smirnov test `kstest` for the null hypothesis that a sample is
drawn from a population that follows a particular distribution.
For the Anderson-Darling test, the critical values depend on
which distribution is being tested against. This function works
for normal, exponential, logistic, or Gumbel (Extreme Value
Type I) distributions.
Parameters
----------
x : array_like
array of sample data
dist : {'norm','expon','logistic','gumbel','extreme1'}, optional
the type of distribution to test against. The default is 'norm'
and 'extreme1' is a synonym for 'gumbel'
Returns
-------
A2 : float
The Anderson-Darling test statistic
critical : list
The critical values for this distribution
sig : list
The significance levels for the corresponding critical values
in percents. The function returns critical values for a
differing set of significance levels depending on the
distribution that is being tested against.
Notes
-----
Critical values provided are for the following significance levels:
normal/exponenential
15%, 10%, 5%, 2.5%, 1%
logistic
25%, 10%, 5%, 2.5%, 1%, 0.5%
Gumbel
25%, 10%, 5%, 2.5%, 1%
If A2 is larger than these critical values then for the corresponding
significance level, the null hypothesis that the data come from the
chosen distribution can be rejected.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and
Some Comparisons, Journal of the American Statistical Association,
Vol. 69, pp. 730-737.
.. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit
Statistics with Unknown Parameters, Annals of Statistics, Vol. 4,
pp. 357-369.
.. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value
Distribution, Biometrika, Vol. 64, pp. 583-588.
.. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference
to Tests for Exponentiality , Technical Report No. 262,
Department of Statistics, Stanford University, Stanford, CA.
.. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution
Based on the Empirical Distribution Function, Biometrika, Vol. 66,
pp. 591-595.
"""
if dist not in ['norm','expon','gumbel','extreme1','logistic']:
raise ValueError("Invalid distribution; dist must be 'norm', "
"'expon', 'gumbel', 'extreme1' or 'logistic'.")
y = sort(x)
xbar = np.mean(x, axis=0)
N = len(y)
if dist == 'norm':
s = np.std(x, ddof=1, axis=0)
w = (y-xbar)/s
z = distributions.norm.cdf(w)
sig = array([15,10,5,2.5,1])
critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N),3)
elif dist == 'expon':
w = y / xbar
z = distributions.expon.cdf(w)
sig = array([15,10,5,2.5,1])
critical = around(_Avals_expon / (1.0 + 0.6/N),3)
elif dist == 'logistic':
def rootfunc(ab,xj,N):
a,b = ab
tmp = (xj-a)/b
tmp2 = exp(tmp)
val = [sum(1.0/(1+tmp2),axis=0)-0.5*N,
sum(tmp*(1.0-tmp2)/(1+tmp2),axis=0)+N]
return array(val)
sol0 = array([xbar,np.std(x, ddof=1, axis=0)])
sol = optimize.fsolve(rootfunc,sol0,args=(x,N),xtol=1e-5)
w = (y-sol[0])/sol[1]
z = distributions.logistic.cdf(w)
sig = array([25,10,5,2.5,1,0.5])
critical = around(_Avals_logistic / (1.0+0.25/N),3)
else: # (dist == 'gumbel') or (dist == 'extreme1'):
# the following is incorrect, see ticket:1097
#def fixedsolve(th,xj,N):
# val = stats.sum(xj)*1.0/N
# tmp = exp(-xj/th)
# term = sum(xj*tmp,axis=0)
# term /= sum(tmp,axis=0)
# return val - term
#s = optimize.fixed_point(fixedsolve, 1.0, args=(x,N),xtol=1e-5)
#xbar = -s*log(sum(exp(-x/s),axis=0)*1.0/N)
xbar, s = distributions.gumbel_l.fit(x)
w = (y-xbar)/s
z = distributions.gumbel_l.cdf(w)
sig = array([25,10,5,2.5,1])
critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)),3)
i = arange(1,N+1)
S = sum((2*i-1.0)/N*(log(z)+log(1-z[::-1])),axis=0)
A2 = -N-S
return A2, critical, sig
def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 7 of Scholz and Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2aKN : float
The A2aKN statistics of Scholz and Stephens 1987.
"""
A2akN = 0.
Z_ssorted_left = Z.searchsorted(Zstar, 'left')
if N == Zstar.size:
lj = 1.
else:
lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left
Bj = Z_ssorted_left + lj / 2.
for i in arange(0, k):
s = np.sort(samples[i])
s_ssorted_right = s.searchsorted(Zstar, side='right')
Mij = s_ssorted_right.astype(np.float)
fij = s_ssorted_right - s.searchsorted(Zstar, 'left')
Mij -= fij / 2.
inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / \
(Bj * (N - Bj) - N * lj / 4.)
A2akN += inner.sum() / n[i]
A2akN *= (N - 1.) / N
return A2akN
def _anderson_ksamp_right(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 6 of Scholz & Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2KN : float
The A2KN statistics of Scholz and Stephens 1987.
"""
A2kN = 0.
lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1],
'left')
Bj = lj.cumsum()
for i in arange(0, k):
s = np.sort(samples[i])
Mij = s.searchsorted(Zstar[:-1], side='right')
inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj))
A2kN += inner.sum() / n[i]
return A2kN
def anderson_ksamp(samples, midrank=True):
"""The Anderson-Darling test for k-samples.
The k-sample Anderson-Darling test is a modification of the
one-sample Anderson-Darling test. It tests the null hypothesis
that k-samples are drawn from the same population without having
to specify the distribution function of that population. The
critical values depend on the number of samples.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample data in arrays.
midrank : bool, optional
Type of Anderson-Darling test which is computed. Default
(True) is the midrank test applicable to continuous and
discrete populations. If False, the right side empirical
distribution is used.
Returns
-------
A2 : float
Normalized k-sample Anderson-Darling test statistic.
critical : array
The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%.
p : float
An approximate significance level at which the null hypothesis for the
provided samples can be rejected.
Raises
------
ValueError
If less than 2 samples are provided, a sample is empty, or no
distinct observations are in the samples.
See Also
--------
ks_2samp : 2 sample Kolmogorov-Smirnov test
anderson : 1 sample Anderson-Darling test
Notes
-----
[1]_ Defines three versions of the k-sample Anderson-Darling test:
one for continuous distributions and two for discrete
distributions, in which ties between samples may occur. The
default of this routine is to compute the version based on the
midrank empirical distribution function. This test is applicable
to continuous and discrete data. If midrank is set to False, the
right side empirical distribution is used for a test for discrete
data. According to [1]_, the two discrete test statistics differ
only slightly if a few collisions due to round-off errors occur in
the test not adjusted for ties between samples.
.. versionadded:: 0.14.0
References
----------
.. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample
Anderson-Darling Tests, Journal of the American Statistical
Association, Vol. 82, pp. 918-924.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(314159)
The null hypothesis that the two random samples come from the same
distribution can be rejected at the 5% level because the returned
test value is greater than the critical value for 5% (1.961) but
not at the 2.5% level. The interpolation gives an approximate
significance level of 3.1%:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(loc=0.5, size=30)])
(2.4615796189876105,
array([ 0.325, 1.226, 1.961, 2.718, 3.752]),
0.03134990135800783)
The null hypothesis cannot be rejected for three samples from an
identical distribution. The approximate p-value (87%) has to be
computed by extrapolation and may not be very accurate:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(size=30), np.random.normal(size=20)])
(-0.73091722665244196,
array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856]),
0.8789283903979661)
"""
k = len(samples)
if (k < 2):
raise ValueError("anderson_ksamp needs at least two samples")
samples = list(map(np.asarray, samples))
Z = np.sort(np.hstack(samples))
N = Z.size
Zstar = np.unique(Z)
if Zstar.size < 2:
raise ValueError("anderson_ksamp needs more than one distinct "
"observation")
n = np.array([sample.size for sample in samples])
if any(n == 0):
raise ValueError("anderson_ksamp encountered sample without "
"observations")
if midrank:
A2kN = _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N)
else:
A2kN = _anderson_ksamp_right(samples, Z, Zstar, k, n, N)
h = (1. / arange(1, N)).sum()
H = (1. / n).sum()
g = 0
for l in arange(1, N-1):
inner = np.array([1. / ((N - l) * m) for m in arange(l+1, N)])
g += inner.sum()
a = (4*g - 6) * (k - 1) + (10 - 6*g)*H
b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6
c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h
d = (2*h + 6)*k**2 - 4*h*k
sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.))
m = k - 1
A2 = (A2kN - m) / math.sqrt(sigmasq)
# The b_i values are the interpolation coefficients from Table 2
# of Scholz and Stephens 1987
b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326])
b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822])
b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396])
critical = b0 + b1 / math.sqrt(m) + b2 / m
pf = np.polyfit(critical, log(np.array([0.25, 0.1, 0.05, 0.025, 0.01])), 2)
if A2 < critical.min() or A2 > critical.max():
warnings.warn("approximate p-value will be computed by extrapolation")
p = math.exp(np.polyval(pf, A2))
return A2, critical, p
def ansari(x,y):
"""
Perform the Ansari-Bradley test for equal scale parameters
The Ansari-Bradley test is a non-parametric test for the equality
of the scale parameter of the distributions from which two
samples were drawn.
Parameters
----------
x, y : array_like
arrays of sample data
Returns
-------
AB : float
The Ansari-Bradley test statistic
p-value : float
The p-value of the hypothesis test
See Also
--------
fligner : A non-parametric test for the equality of k variances
mood : A non-parametric test for the equality of two scale parameters
Notes
-----
The p-value given is exact when the sample sizes are both less than
55 and there are no ties, otherwise a normal approximation for the
p-value is used.
References
----------
.. [1] Sprent, Peter and N.C. Smeeton. Applied nonparametric statistical
methods. 3rd ed. Chapman and Hall/CRC. 2001. Section 5.8.2.
"""
x,y = asarray(x),asarray(y)
n = len(x)
m = len(y)
if m < 1:
raise ValueError("Not enough other observations.")
if n < 1:
raise ValueError("Not enough test observations.")
N = m+n
xy = r_[x,y] # combine
rank = stats.rankdata(xy)
symrank = amin(array((rank,N-rank+1)),0)
AB = sum(symrank[:n],axis=0)
uxy = unique(xy)
repeats = (len(uxy) != len(xy))
exact = ((m < 55) and (n < 55) and not repeats)
if repeats and ((m < 55) or (n < 55)):
warnings.warn("Ties preclude use of exact statistic.")
if exact:
astart, a1, ifault = statlib.gscale(n,m)
ind = AB-astart
total = sum(a1,axis=0)
if ind < len(a1)/2.0:
cind = int(ceil(ind))
if (ind == cind):
pval = 2.0*sum(a1[:cind+1],axis=0)/total
else:
pval = 2.0*sum(a1[:cind],axis=0)/total
else:
find = int(floor(ind))
if (ind == floor(ind)):
pval = 2.0*sum(a1[find:],axis=0)/total
else:
pval = 2.0*sum(a1[find+1:],axis=0)/total
return AB, min(1.0,pval)
# otherwise compute normal approximation
if N % 2: # N odd
mnAB = n*(N+1.0)**2 / 4.0 / N
varAB = n*m*(N+1.0)*(3+N**2)/(48.0*N**2)
else:
mnAB = n*(N+2.0)/4.0
varAB = m*n*(N+2)*(N-2.0)/48/(N-1.0)
if repeats: # adjust variance estimates
# compute sum(tj * rj**2,axis=0)
fac = sum(symrank**2,axis=0)
if N % 2: # N odd
varAB = m*n*(16*N*fac-(N+1)**4)/(16.0 * N**2 * (N-1))
else: # N even
varAB = m*n*(16*fac-N*(N+2)**2)/(16.0 * N * (N-1))
z = (AB - mnAB)/sqrt(varAB)
pval = distributions.norm.sf(abs(z)) * 2.0
return AB, pval
def bartlett(*args):
"""
Perform Bartlett's test for equal variances
Bartlett's test tests the null hypothesis that all input samples
are from populations with equal variances. For samples
from significantly non-normal populations, Levene's test
`levene` is more robust.
Parameters
----------
sample1, sample2,... : array_like
arrays of sample data. May be different lengths.
Returns
-------
T : float
The test statistic.
p-value : float
The p-value of the test.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm
.. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical
Methods, Eighth Edition, Iowa State University Press.
"""
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
ssq = zeros(k,'d')
for j in range(k):
Ni[j] = len(args[j])
ssq[j] = np.var(args[j], ddof=1)
Ntot = sum(Ni,axis=0)
spsq = sum((Ni-1)*ssq,axis=0)/(1.0*(Ntot-k))
numer = (Ntot*1.0-k)*log(spsq) - sum((Ni-1.0)*log(ssq),axis=0)
denom = 1.0 + (1.0/(3*(k-1)))*((sum(1.0/(Ni-1.0),axis=0))-1.0/(Ntot-k))
T = numer / denom
pval = distributions.chi2.sf(T,k-1) # 1 - cdf
return T, pval
def levene(*args, **kwds):
"""
Perform Levene test for equal variances.
The Levene test tests the null hypothesis that all input samples
are from populations with equal variances. Levene's test is an
alternative to Bartlett's test `bartlett` in the case where
there are significant deviations from normality.
Parameters
----------
sample1, sample2, ... : array_like
The sample data, possibly with different lengths
center : {'mean', 'median', 'trimmed'}, optional
Which function of the data to use in the test. The default
is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
W : float
The test statistic.
p-value : float
The p-value for the test.
Notes
-----
Three variations of Levene's test are possible. The possibilities
and their recommended usages are:
* 'median' : Recommended for skewed (non-normal) distributions>
* 'mean' : Recommended for symmetric, moderate-tailed distributions.
* 'trimmed' : Recommended for heavy-tailed distributions.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm
.. [2] Levene, H. (1960). In Contributions to Probability and Statistics:
Essays in Honor of Harold Hotelling, I. Olkin et al. eds.,
Stanford University Press, pp. 278-292.
.. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American
Statistical Association, 69, 364-367
"""
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("levene() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
Yci = zeros(k, 'd')
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
+ "or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(np.sort(arg), proportiontocut)
for arg in args)
func = lambda x: np.mean(x, axis=0)
for j in range(k):
Ni[j] = len(args[j])
Yci[j] = func(args[j])
Ntot = sum(Ni, axis=0)
# compute Zij's
Zij = [None]*k
for i in range(k):
Zij[i] = abs(asarray(args[i])-Yci[i])
# compute Zbari
Zbari = zeros(k, 'd')
Zbar = 0.0
for i in range(k):
Zbari[i] = np.mean(Zij[i], axis=0)
Zbar += Zbari[i]*Ni[i]
Zbar /= Ntot
numer = (Ntot-k) * sum(Ni*(Zbari-Zbar)**2, axis=0)
# compute denom_variance
dvar = 0.0
for i in range(k):
dvar += sum((Zij[i]-Zbari[i])**2, axis=0)
denom = (k-1.0)*dvar
W = numer / denom
pval = distributions.f.sf(W, k-1, Ntot-k) # 1 - cdf
return W, pval
@setastest(False)
def binom_test(x, n=None, p=0.5):
"""
Perform a test that the probability of success is p.
This is an exact, two-sided test of the null hypothesis
that the probability of success in a Bernoulli experiment
is `p`.
Parameters
----------
x : integer or array_like
the number of successes, or if x has length 2, it is the
number of successes and the number of failures.
n : integer
the number of trials. This is ignored if x gives both the
number of successes and failures
p : float, optional
The hypothesized probability of success. 0 <= p <= 1. The
default value is p = 0.5
Returns
-------
p-value : float
The p-value of the hypothesis test
References
----------
.. [1] http://en.wikipedia.org/wiki/Binomial_test
"""
x = atleast_1d(x).astype(np.integer)
if len(x) == 2:
n = x[1]+x[0]
x = x[0]
elif len(x) == 1:
x = x[0]
if n is None or n < x:
raise ValueError("n must be >= x")
n = np.int_(n)
else:
raise ValueError("Incorrect length for x.")
if (p > 1.0) or (p < 0.0):
raise ValueError("p must be in range [0,1]")
d = distributions.binom.pmf(x, n, p)
rerr = 1+1e-7
if (x == p*n):
# special case as shortcut, would also be handled by `else` below
pval = 1.
elif (x < p*n):
i = np.arange(np.ceil(p*n), n+1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(x, n, p) +
distributions.binom.sf(n-y, n, p))
else:
i = np.arange(np.floor(p*n) + 1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(y-1, n, p) +
distributions.binom.sf(x-1, n, p))
return min(1.0, pval)
def _apply_func(x, g, func):
# g is list of indices into x
# separating x into different groups
# func should be applied over the groups
g = unique(r_[0, g, len(x)])
output = []
for k in range(len(g)-1):
output.append(func(x[g[k]:g[k+1]]))
return asarray(output)
def fligner(*args, **kwds):
"""
Perform Fligner's test for equal variances.
Fligner's test tests the null hypothesis that all input samples
are from populations with equal variances. Fligner's test is
non-parametric in contrast to Bartlett's test `bartlett` and
Levene's test `levene`.
Parameters
----------
sample1, sample2, ... : array_like
Arrays of sample data. Need not be the same length.
center : {'mean', 'median', 'trimmed'}, optional
Keyword argument controlling which function of the data is used in
computing the test statistic. The default is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
Xsq : float
The test statistic.
p-value : float
The p-value for the hypothesis test.
Notes
-----
As with Levene's test there are three variants of Fligner's test that
differ by the measure of central tendency used in the test. See `levene`
for more information.
References
----------
.. [1] http://www.stat.psu.edu/~bgl/center/tr/TR993.ps
.. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample
tests for scale. 'Journal of the American Statistical Association.'
71(353), 210-213.
"""
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("fligner() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
if center not in ['mean','median','trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
+ "or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(arg, proportiontocut) for arg in args)
func = lambda x: np.mean(x, axis=0)
Ni = asarray([len(args[j]) for j in range(k)])
Yci = asarray([func(args[j]) for j in range(k)])
Ntot = sum(Ni, axis=0)
# compute Zij's
Zij = [abs(asarray(args[i]) - Yci[i]) for i in range(k)]
allZij = []
g = [0]
for i in range(k):
allZij.extend(list(Zij[i]))
g.append(len(allZij))
ranks = stats.rankdata(allZij)
a = distributions.norm.ppf(ranks/(2*(Ntot + 1.0)) + 0.5)
# compute Aibar
Aibar = _apply_func(a, g, sum) / Ni
anbar = np.mean(a, axis=0)
varsq = np.var(a, axis=0, ddof=1)
Xsq = sum(Ni*(asarray(Aibar) - anbar)**2.0, axis=0)/varsq
pval = distributions.chi2.sf(Xsq, k - 1) # 1 - cdf
return Xsq, pval
def mood(x, y, axis=0):
"""
Perform Mood's test for equal scale parameters.
Mood's two-sample test for scale parameters is a non-parametric
test for the null hypothesis that two samples are drawn from the
same distribution with the same scale parameter.
Parameters
----------
x, y : array_like
Arrays of sample data.
axis: int, optional
The axis along which the samples are tested. `x` and `y` can be of
different length along `axis`.
If `axis` is None, `x` and `y` are flattened and the test is done on
all values in the flattened arrays.
Returns
-------
z : scalar or ndarray
The z-score for the hypothesis test. For 1-D inputs a scalar is
returned.
p-value : scalar ndarray
The p-value for the hypothesis test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
ansari : A non-parametric test for the equality of 2 variances
bartlett : A parametric test for equality of k variances in normal samples
levene : A parametric test for equality of k variances
Notes
-----
The data are assumed to be drawn from probability distributions ``f(x)``
and ``f(x/s) / s`` respectively, for some probability density function f.
The null hypothesis is that ``s == 1``.
For multi-dimensional arrays, if the inputs are of shapes
``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the
resulting z and p values will have shape ``(n0, n2, n3)``. Note that
``n1`` and ``m1`` don't have to be equal, but the other dimensions do.
Examples
--------
>>> from scipy import stats
>>> x2 = np.random.randn(2, 45, 6, 7)
>>> x1 = np.random.randn(2, 30, 6, 7)
>>> z, p = stats.mood(x1, x2, axis=1)
>>> p.shape
(2, 6, 7)
Find the number of points where the difference in scale is not significant:
>>> (p > 0.1).sum()
74
Perform the test with different scales:
>>> x1 = np.random.randn(2, 30)
>>> x2 = np.random.randn(2, 35) * 10.0
>>> stats.mood(x1, x2, axis=1)
(array([-5.84332354, -5.6840814 ]), array([5.11694980e-09, 1.31517628e-08]))
"""
x = np.asarray(x, dtype=float)
y = np.asarray(y, dtype=float)
if axis is None:
x = x.flatten()
y = y.flatten()
axis = 0
# Determine shape of the result arrays
res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis])
if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if
ax != axis])):
raise ValueError("Dimensions of x and y on all axes except `axis` "
"should match")
n = x.shape[axis]
m = y.shape[axis]
N = m + n
if N < 3:
raise ValueError("Not enough observations.")
xy = np.concatenate((x, y), axis=axis)
if axis != 0:
xy = np.rollaxis(xy, axis)
xy = xy.reshape(xy.shape[0], -1)
# Generalized to the n-dimensional case by adding the axis argument, and
# using for loops, since rankdata is not vectorized. For improving
# performance consider vectorizing rankdata function.
all_ranks = np.zeros_like(xy)
for j in range(xy.shape[1]):
all_ranks[:, j] = stats.rankdata(xy[:, j])
Ri = all_ranks[:n]
M = sum((Ri - (N + 1.0) / 2) ** 2, axis=0)
# Approx stat.
mnM = n * (N * N - 1.0) / 12
varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180
z = (M - mnM) / sqrt(varM)
# sf for right tail, cdf for left tail. Factor 2 for two-sidedness
z_pos = z > 0
pval = np.zeros_like(z)
pval[z_pos] = 2 * distributions.norm.sf(z[z_pos])
pval[~z_pos] = 2 * distributions.norm.cdf(z[~z_pos])
if res_shape == ():
# Return scalars, not 0-D arrays
z = z[0]
pval = pval[0]
else:
z.shape = res_shape
pval.shape = res_shape
return z, pval
def wilcoxon(x, y=None, zero_method="wilcox", correction=False):
"""
Calculate the Wilcoxon signed-rank test.
The Wilcoxon signed-rank test tests the null hypothesis that two
related paired samples come from the same distribution. In particular,
it tests whether the distribution of the differences x - y is symmetric
about zero. It is a non-parametric version of the paired T-test.
Parameters
----------
x : array_like
The first set of measurements.
y : array_like, optional
The second set of measurements. If `y` is not given, then the `x`
array is considered to be the differences between the two sets of
measurements.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt":
Pratt treatment: includes zero-differences in the ranking process
(more conservative)
"wilcox":
Wilcox treatment: discards all zero-differences
"zsplit":
Zero rank split: just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the
z-statistic. Default is False.
Returns
-------
T : float
The sum of the ranks of the differences above or below zero, whichever
is smaller.
p-value : float
The two-sided p-value for the test.
Notes
-----
Because the normal approximation is used for the calculations, the
samples used should be large. A typical rule is to require that
n > 20.
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test
"""
if zero_method not in ["wilcox", "pratt", "zsplit"]:
raise ValueError("Zero method should be either 'wilcox' \
or 'pratt' or 'zsplit'")
if y is None:
d = x
else:
x, y = map(asarray, (x, y))
if len(x) != len(y):
raise ValueError('Unequal N in wilcoxon. Aborting.')
d = x-y
if zero_method == "wilcox":
d = compress(not_equal(d, 0), d, axis=-1) # Keep all non-zero differences
count = len(d)
if (count < 10):
warnings.warn("Warning: sample size too small for normal approximation.")
r = stats.rankdata(abs(d))
r_plus = sum((d > 0) * r, axis=0)
r_minus = sum((d < 0) * r, axis=0)
if zero_method == "zsplit":
r_zero = sum((d == 0) * r, axis=0)
r_plus += r_zero / 2.
r_minus += r_zero / 2.
T = min(r_plus, r_minus)
mn = count*(count + 1.) * 0.25
se = count*(count + 1.) * (2. * count + 1.)
if zero_method == "pratt":
r = r[d != 0]
replist, repnum = find_repeats(r)
if repnum.size != 0:
# Correction for repeated elements.
se -= 0.5 * (repnum * (repnum * repnum - 1)).sum()
se = sqrt(se / 24)
correction = 0.5 * int(bool(correction)) * np.sign(T - mn)
z = (T - mn - correction) / se
prob = 2. * distributions.norm.sf(abs(z))
return T, prob
@setastest(False)
def median_test(*args, **kwds):
"""
Mood's median test.
Test that two or more samples come from populations with the same median.
Let ``n = len(args)`` be the number of samples. The "grand median" of
all the data is computed, and a contingency table is formed by
classifying the values in each sample as being above or below the grand
median. The contingency table, along with `correction` and `lambda_`,
are passed to `scipy.stats.chi2_contingency` to compute the test statistic
and p-value.
Parameters
----------
sample1, sample2, ... : array_like
The set of samples. There must be at least two samples.
Each sample must be a one-dimensional sequence containing at least
one value. The samples are not required to have the same length.
ties : str, optional
Determines how values equal to the grand median are classified in
the contingency table. The string must be one of::
"below":
Values equal to the grand median are counted as "below".
"above":
Values equal to the grand median are counted as "above".
"ignore":
Values equal to the grand median are not counted.
The default is "below".
correction : bool, optional
If True, *and* there are just two samples, apply Yates' correction
for continuity when computing the test statistic associated with
the contingency table. Default is True.
lambda_ : float or str, optional.
By default, the statistic computed in this test is Pearson's
chi-squared statistic. `lambda_` allows a statistic from the
Cressie-Read power divergence family to be used instead. See
`power_divergence` for details.
Default is 1 (Pearson's chi-squared statistic).
Returns
-------
stat : float
The test statistic. The statistic that is returned is determined by
`lambda_`. The default is Pearson's chi-squared statistic.
p : float
The p-value of the test.
m : float
The grand median.
table : ndarray
The contingency table. The shape of the table is (2, n), where
n is the number of samples. The first row holds the counts of the
values above the grand median, and the second row holds the counts
of the values below the grand median. The table allows further
analysis with, for example, `scipy.stats.chi2_contingency`, or with
`scipy.stats.fisher_exact` if there are two samples, without having
to recompute the table.
See Also
--------
kruskal : Compute the Kruskal-Wallis H-test for independent samples.
mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y.
Notes
-----
.. versionadded:: 0.15.0
References
----------
.. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill
(1950), pp. 394-399.
.. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010).
See Sections 8.12 and 10.15.
Examples
--------
A biologist runs an experiment in which there are three groups of plants.
Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants.
Each plant produces a number of seeds. The seed counts for each group
are::
Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49
Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99
Group 3: 0 3 9 22 23 25 25 33 34 34 40 45 46 48 62 67 84
The following code applies Mood's median test to these samples.
>>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49]
>>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99]
>>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84]
>>> stat, p, med, tbl = median_test(g1, g2, g3)
The median is
>>> med
34.0
and the contingency table is
>>> tbl
array([[ 5, 10, 7],
[11, 5, 10]])
`p` is too large to conclude that the medians are not the same:
>>> p
0.12609082774093244
The "G-test" can be performed by passing ``lambda_="log-likelihood"`` to
`median_test`.
>>> g, p, med, tbl = median_test(g1, g2, g3, lambda_="log-likelihood")
>>> p
0.12224779737117837
The median occurs several times in the data, so we'll get a different
result if, for example, ``ties="above"`` is used:
>>> stat, p, med, tbl = median_test(g1, g2, g3, ties="above")
>>> p
0.063873276069553273
>>> tbl
array([[ 5, 11, 9],
[11, 4, 8]])
This example demonstrates that if the data set is not large and there
are values equal to the median, the p-value can be sensitive to the
choice of `ties`.
"""
ties = kwds.pop('ties', 'below')
correction = kwds.pop('correction', True)
lambda_ = kwds.pop('lambda_', None)
if len(kwds) > 0:
bad_kwd = kwds.keys()[0]
raise TypeError("median_test() got an unexpected keyword "
"argument %r" % bad_kwd)
if len(args) < 2:
raise ValueError('median_test requires two or more samples.')
ties_options = ['below', 'above', 'ignore']
if ties not in ties_options:
raise ValueError("invalid 'ties' option '%s'; 'ties' must be one "
"of: %s" % (ties, str(ties_options)[1:-1]))
data = [np.asarray(arg) for arg in args]
# Validate the sizes and shapes of the arguments.
for k, d in enumerate(data):
if d.size == 0:
raise ValueError("Sample %d is empty. All samples must "
"contain at least one value." % (k + 1))
if d.ndim != 1:
raise ValueError("Sample %d has %d dimensions. All "
"samples must be one-dimensional sequences." %
(k + 1, d.ndim))
grand_median = np.median(np.concatenate(data))
# Create the contingency table.
table = np.zeros((2, len(data)), dtype=np.int64)
for k, sample in enumerate(data):
nabove = count_nonzero(sample > grand_median)
nbelow = count_nonzero(sample < grand_median)
nequal = sample.size - (nabove + nbelow)
table[0, k] += nabove
table[1, k] += nbelow
if ties == "below":
table[1, k] += nequal
elif ties == "above":
table[0, k] += nequal
# Check that no row or column of the table is all zero.
# Such a table can not be given to chi2_contingency, because it would have
# a zero in the table of expected frequencies.
rowsums = table.sum(axis=1)
if rowsums[0] == 0:
raise ValueError("All values are below the grand median (%r)." %
grand_median)
if rowsums[1] == 0:
raise ValueError("All values are above the grand median (%r)." %
grand_median)
if ties == "ignore":
# We already checked that each sample has at least one value, but it
# is possible that all those values equal the grand median. If `ties`
# is "ignore", that would result in a column of zeros in `table`. We
# check for that case here.
zero_cols = np.where((table == 0).all(axis=0))[0]
if len(zero_cols) > 0:
msg = ("All values in sample %d are equal to the grand "
"median (%r), so they are ignored, resulting in an "
"empty sample." % (zero_cols[0] + 1, grand_median))
raise ValueError(msg)
stat, p, dof, expected = chi2_contingency(table, lambda_=lambda_,
correction=correction)
return stat, p, grand_median, table
def _hermnorm(N):
# return the negatively normalized hermite polynomials up to order N-1
# (inclusive)
# using the recursive relationship
# p_n+1 = p_n(x)' - x*p_n(x)
# and p_0(x) = 1
plist = [None]*N
plist[0] = poly1d(1)
for n in range(1,N):
plist[n] = plist[n-1].deriv() - poly1d([1,0])*plist[n-1]
return plist
def pdf_fromgamma(g1, g2, g3=0.0, g4=None):
if g4 is None:
g4 = 3*g2*g2
sigsq = 1.0/g2
sig = sqrt(sigsq)
mu = g1*sig**3.0
p12 = _hermnorm(13)
for k in range(13):
p12[k] = p12[k]/sig**k
# Add all of the terms to polynomial
totp = p12[0] - (g1/6.0*p12[3]) + \
(g2/24.0*p12[4] + g1*g1/72.0*p12[6]) - \
(g3/120.0*p12[5] + g1*g2/144.0*p12[7] + g1**3.0/1296.0*p12[9]) + \
(g4/720*p12[6] + (g2*g2/1152.0+g1*g3/720)*p12[8] +
g1*g1*g2/1728.0*p12[10] + g1**4.0/31104.0*p12[12])
# Final normalization
totp = totp / sqrt(2*pi)/sig
def thefunc(x):
xn = (x-mu)/sig
return totp(xn)*exp(-xn*xn/2.0)
return thefunc
def _circfuncs_common(samples, high, low):
samples = np.asarray(samples)
if samples.size == 0:
return np.nan, np.nan
ang = (samples - low)*2*pi / (high-low)
return samples, ang
def circmean(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular mean for samples in a range.
Parameters
----------
samples : array_like
Input array.
high : float or int, optional
High boundary for circular mean range. Default is ``2*pi``.
low : float or int, optional
Low boundary for circular mean range. Default is 0.
axis : int, optional
Axis along which means are computed. The default is to compute
the mean of the flattened array.
Returns
-------
circmean : float
Circular mean.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = angle(np.mean(exp(1j*ang), axis=axis))
mask = res < 0
if (mask.ndim > 0):
res[mask] += 2*pi
elif mask:
res = res + 2*pi
return res*(high-low)/2.0/pi + low
def circvar(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular variance for samples assumed to be in a range
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular variance range. Default is 0.
high : float or int, optional
High boundary for circular variance range. Default is ``2*pi``.
axis : int, optional
Axis along which variances are computed. The default is to compute
the variance of the flattened array.
Returns
-------
circvar : float
Circular variance.
Notes
-----
This uses a definition of circular variance that in the limit of small
angles returns a number close to the 'linear' variance.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = np.mean(exp(1j*ang), axis=axis)
R = abs(res)
return ((high-low)/2.0/pi)**2 * 2 * log(1/R)
def circstd(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular standard deviation for samples assumed to be in the
range [low to high].
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular standard deviation range. Default is 0.
high : float or int, optional
High boundary for circular standard deviation range.
Default is ``2*pi``.
axis : int, optional
Axis along which standard deviations are computed. The default is
to compute the standard deviation of the flattened array.
Returns
-------
circstd : float
Circular standard deviation.
Notes
-----
This uses a definition of circular standard deviation that in the limit of
small angles returns a number close to the 'linear' standard deviation.
"""
samples, ang = _circfuncs_common(samples, high, low)
res = np.mean(exp(1j*ang), axis=axis)
R = abs(res)
return ((high-low)/2.0/pi) * sqrt(-2*log(R))
# Tests to include (from R) -- some of these already in stats.
########
# X Ansari-Bradley
# X Bartlett (and Levene)
# X Binomial
# Y Pearson's Chi-squared (stats.chisquare)
# Y Association Between Paired samples (stats.pearsonr, stats.spearmanr)
# stats.kendalltau) -- these need work though
# Fisher's exact test
# X Fligner-Killeen Test
# Y Friedman Rank Sum (stats.friedmanchisquare?)
# Y Kruskal-Wallis
# Y Kolmogorov-Smirnov
# Cochran-Mantel-Haenszel Chi-Squared for Count
# McNemar's Chi-squared for Count
# X Mood Two-Sample
# X Test For Equal Means in One-Way Layout (see stats.ttest also)
# Pairwise Comparisons of proportions
# Pairwise t tests
# Tabulate p values for pairwise comparisons
# Pairwise Wilcoxon rank sum tests
# Power calculations two sample test of prop.
# Power calculations for one and two sample t tests
# Equal or Given Proportions
# Trend in Proportions
# Quade Test
# Y Student's T Test
# Y F Test to compare two variances
# XY Wilcoxon Rank Sum and Signed Rank Tests
| apache-2.0 |
CVML/scikit-learn | sklearn/feature_extraction/tests/test_feature_hasher.py | 258 | 2861 | from __future__ import unicode_literals
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from nose.tools import assert_raises, assert_true
from numpy.testing import assert_array_equal, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"dada": 42, "tzara": 37}, {"gaga": 17}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
| bsd-3-clause |
TheCentralLimit/TopSecret | src/main.py | 1 | 3926 | #!/usr/bin/env python2
from __future__ import division, print_function
from astroML.decorators import pickle_results
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import scipy
from os import path
from sys import argv
from classifier import classifier
from fit import power_law
import gw
from density import chirp_mass_distribution
from utils import make_sure_path_exists
from chis_code import chis_code
def main(data_filename, output_directory, *features):
# Set random seed.
np.random.seed(1)
# Create output directory if it does not exist.
make_sure_path_exists(output_directory)
# Load data from file.
m_1, m_2, s, rho = np.loadtxt(data_filename, unpack=True)
s = s.astype(bool)
# Compute standard quantitites.
eta = gw.symmetric_mass_ratio(m_1, m_2)
M_c = gw.chirp_mass(m_1, m_2)
x_err = gw.chirp_mass_log_error(M_c, rho)
q = gw.mass_ratio(m_1, m_2)
q_err = gw.mass_ratio_error(M_c, rho)
D = gw.detectable_distance(M_c)
V = (4/3) * np.pi * D**3
T = 0.6
# Transform M_c into log-space.
x = np.log10(M_c)
# Compute the weights for each M_c.
w = 1 / (V*T)
# Generate `n_smooth` evenly-spaced values of log(M_c) for visualization
# purposes.
x_smooth = np.linspace(np.min(x), np.max(x), num=1000)
M_c_smooth = 10**x_smooth
# Create Figure.
fig_density = plt.figure()
# Set layout of Figure such that there are 3 vertically stacked subplots,
# with the bottom one being 1/5 the size of the other two.
gs = mpl.gridspec.GridSpec(2, 1, height_ratios=[5,1])
# Create subplot axes, with shared x-axes.
ax_pdf = fig_density.add_subplot(gs[0])
ax_data = fig_density.add_subplot(gs[1], sharex=ax_pdf)
# Set axis labels.
ax_data.set_xlabel(r"$\mathcal{M}_c\ [M_\odot]$")
ax_pdf.set_ylabel(r"$r(\mathcal{M}_c)$")
# Hide unwanted axis ticks and tick labels.
plt.setp(ax_pdf.xaxis.get_ticklabels(), visible=False)
ax_data.yaxis.set_ticks([])
ax_pdf.semilogx()
ax_data.semilogx()
# Create log-scale Figure.
fig_log_density = plt.figure()
# Set layout of Figure such that there are 3 vertically stacked subplots,
# with the bottom one being 1/5 the size of the other two.
gs = mpl.gridspec.GridSpec(2, 1, height_ratios=[5,1])
# Create subplot axes, with shared x-axes.
ax_log_pdf = fig_log_density.add_subplot(gs[0])
ax_log_data = fig_log_density.add_subplot(gs[1], sharex=ax_log_pdf)
# Set axis labels.
ax_log_data.set_xlabel(r"$\mathcal{M}_c\ [M_\odot]$")
ax_log_pdf.set_ylabel(r"$r(\mathcal{M}_c)$")
# Hide unwanted axis ticks and tick labels.
plt.setp(ax_log_pdf.xaxis.get_ticklabels(), visible=False)
ax_log_data.yaxis.set_ticks([])
ax_log_pdf.loglog()
ax_log_data.semilogx()
r_fn, r_err_fn = chirp_mass_distribution(M_c, M_c_smooth, x, x_smooth, w, s,
ax_pdf, ax_data,
ax_log_pdf, ax_log_data)
if ("power_law" in features) or ("all" in features):
power_law(r_fn, r_err_fn, M_c, M_c_smooth, x, x_smooth,
ax_pdf, ax_data, ax_log_pdf, ax_log_data)
if ("mcmc" in features) or ("all" in features):
lam_mcmc = chis_code(np.log10(M_c),r_fn(np.log10(M_c)),r_err_fn(np.log10(M_c)),output_directory) # (x,y,yerr)
if ("classifier" in features) or ("all" in features):
classifier(m_1, m_2, M_c, s,
ax_pdf, ax_data, ax_log_pdf, ax_log_data,
output_directory)
ax_pdf.legend()
fig_density.savefig(path.join(output_directory,
"chirp-mass-distribution.pdf"))
fig_log_density.savefig(path.join(output_directory,
"chirp-mass-log-distribution.pdf"))
if __name__ == "__main__":
main(*argv[1:])
| mit |
ddcampayo/ddcampayo.github.io | cursos_previos/Curso_CFD_OS/Exploring-OpenFOAM-master/laminarVortexShedding/strouhal.py | 3 | 1531 | #!/usr/bin/python
# Comflics: Exploring OpenFOAM
# Compute Strouhal Number of Laminar Vortex Shedding
# S. Huq, 13MAY17
#
import numpy as np
import scipy.signal as signal
import matplotlib.pyplot as plt
# # Read Results
data = np.loadtxt('./postProcessing/forceCoeffs/0/forceCoeffs.dat', skiprows=0)
L = 2 # L = D - Diameter
V = 1 # Velocity
time = data[:,0]
Cd = data[:,2]
Cl = data[:,3]
del data
# # Compute FFT
N = len(time)
dt = time[2] - time[1]
# # inaccurate FFT
# freq = np.fft.fftfreq(N, dt)
# Cd_fft = np.fft.fft(Cd)
# Cl_amp = np.fft.fft(Cl)
# plt.plot(freq, Cl_amp) # Figure 2.10
# plt.show()
# # Better stable FFT
nmax=512 # no. of points in the fft
# freq, Cd_amp = signal.welch(Cd, 1./dt, nperseg=nmax)
freq, Cl_amp = signal.welch(Cl, 1./dt, nperseg=nmax)
plt.plot(freq, Cl_amp) # Figure 2.10
plt.show()
# # Strouhal Number
# Find the index corresponding to max amplitude
Cl_max_fft_idx = np.argmax(abs(Cl_amp))
freq_shed = freq[Cl_max_fft_idx ]
St = freq_shed * L / V
print "Vortex shedding freq: %.3f [Hz]" % (freq_shed)
print "Strouhal Number: %.3f" % (St)
# # Explore Results
# #
# # Figure 2.8
# # See if there atleast 10 cycles of oscillation
# # improves the accuracy;
# plt.plot(time,Cl)
# plt.show()
# # Figure 2.9
# plt.plot(time,Cd)
# plt.show()
# #
# # Exercise
# # Exclude data before onset of the oscillations.
# # approx time = 200 s.
# # Hint: skiprows = 800 - 950
| gpl-3.0 |
Mazecreator/tensorflow | tensorflow/python/estimator/inputs/queues/feeding_functions.py | 3 | 18978 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for enqueuing data from arrays and pandas `DataFrame`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import types as tp
import numpy as np
import six
from tensorflow.python.estimator.inputs.queues import feeding_queue_runner as fqr
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import queue_runner
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _fill_array(arr, seq, fillvalue=0):
"""
Recursively fills padded arr with elements from seq.
If length of seq is less then arr padded length, fillvalue used.
Args:
arr: Padded tensor of shape [batch_size, ..., max_padded_dim_len].
seq: Non-padded list of data sampels of shape
[batch_size, ..., padded_dim(None)]
fillvalue: Default fillvalue to use.
"""
if arr.ndim == 1:
try:
len_ = len(seq)
except TypeError:
len_ = 0
arr[:len_] = seq
arr[len_:] = fillvalue
else:
for subarr, subseq in six.moves.zip_longest(arr, seq, fillvalue=()):
_fill_array(subarr, subseq, fillvalue)
def _pad_if_needed(batch_key_item, fillvalue=0):
""" Returns padded batch.
Args:
batch_key_item: List of data samples of any type with shape
[batch_size, ..., padded_dim(None)].
fillvalue: Default fillvalue to use.
Returns:
Padded with zeros tensor of same type and shape
[batch_size, ..., max_padded_dim_len].
Raises:
ValueError if data samples have different shapes (except last padded dim).
"""
shapes = [seq.shape[:-1] if len(seq.shape) > 0 else -1
for seq in batch_key_item]
if not all(shapes[0] == x for x in shapes):
raise ValueError("Array shapes must match.")
last_length = [seq.shape[-1] if len(seq.shape) > 0 else 0
for seq in batch_key_item]
if all([x == last_length[0] for x in last_length]):
return batch_key_item
batch_size = len(batch_key_item)
max_sequence_length = max(last_length)
result_batch = np.zeros(
shape=[batch_size] + list(shapes[0]) + [max_sequence_length],
dtype=batch_key_item[0].dtype)
_fill_array(result_batch, batch_key_item, fillvalue)
return result_batch
def _get_integer_indices_for_next_batch(
batch_indices_start, batch_size, epoch_end, array_length,
current_epoch, total_epochs):
"""Returns the integer indices for next batch.
If total epochs is not None and current epoch is the final epoch, the end
index of the next batch should not exceed the `epoch_end` (i.e., the final
batch might not have size `batch_size` to avoid overshooting the last epoch).
Args:
batch_indices_start: Integer, the index to start next batch.
batch_size: Integer, size of batches to return.
epoch_end: Integer, the end index of the epoch. The epoch could start from a
random position, so `epoch_end` provides the end index for that.
array_length: Integer, the length of the array.
current_epoch: Integer, the epoch number has been emitted.
total_epochs: Integer or `None`, the total number of epochs to emit. If
`None` will run forever.
Returns:
A tuple of a list with integer indices for next batch and `current_epoch`
value after the next batch.
Raises:
OutOfRangeError if `current_epoch` is not less than `total_epochs`.
"""
if total_epochs is not None and current_epoch >= total_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % current_epoch)
batch_indices_end = batch_indices_start + batch_size
batch_indices = [j % array_length for j in
range(batch_indices_start, batch_indices_end)]
epoch_end_indices = [i for i, x in enumerate(batch_indices) if x == epoch_end]
current_epoch += len(epoch_end_indices)
if total_epochs is None or current_epoch < total_epochs:
return (batch_indices, current_epoch)
# Now we might have emitted more data for expected epochs. Need to trim.
final_epoch_end_inclusive = epoch_end_indices[
-(current_epoch - total_epochs + 1)]
batch_indices = batch_indices[:final_epoch_end_inclusive + 1]
return (batch_indices, total_epochs)
class _ArrayFeedFn(object):
"""Creates feed dictionaries from numpy arrays."""
def __init__(self,
placeholders,
array,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != 2:
raise ValueError("_array_feed_fn expects 2 placeholders; got {}.".format(
len(placeholders)))
self._placeholders = placeholders
self._array = array
self._max = len(array)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
return {
self._placeholders[0]: integer_indexes,
self._placeholders[1]: self._array[integer_indexes]
}
class _OrderedDictNumpyFeedFn(object):
"""Creates feed dictionaries from `OrderedDict`s of numpy arrays."""
def __init__(self,
placeholders,
ordered_dict_of_arrays,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(ordered_dict_of_arrays) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(ordered_dict_of_arrays), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._ordered_dict_of_arrays = ordered_dict_of_arrays
self._max = len(next(iter(ordered_dict_of_arrays.values())))
for _, v in ordered_dict_of_arrays.items():
if len(v) != self._max:
raise ValueError("Array lengths must match.")
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
feed_dict = {self._index_placeholder: integer_indexes}
cols = [
column[integer_indexes]
for column in self._ordered_dict_of_arrays.values()
]
feed_dict.update(dict(zip(self._col_placeholders, cols)))
return feed_dict
class _PandasFeedFn(object):
"""Creates feed dictionaries from pandas `DataFrames`."""
def __init__(self,
placeholders,
dataframe,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(dataframe.columns) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(dataframe.columns), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._dataframe = dataframe
self._max = len(dataframe)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
result = self._dataframe.iloc[integer_indexes]
cols = [result[col].values for col in result.columns]
feed_dict = dict(zip(self._col_placeholders, cols))
feed_dict[self._index_placeholder] = result.index.values
return feed_dict
class _GeneratorFeedFn(object):
"""Creates feed dictionaries from `Generator` of `dicts` of numpy arrays."""
def __init__(self,
placeholders,
generator,
batch_size,
random_start=False,
seed=None,
num_epochs=None,
pad_value=None):
first_sample = next(generator())
if len(placeholders) != len(first_sample):
raise ValueError("Expected {} placeholders; got {}.".format(
len(first_sample), len(placeholders)))
self._keys = sorted(list(first_sample.keys()))
self._col_placeholders = placeholders
self._generator_function = generator
self._iterator = generator()
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
self._pad_value = pad_value
random.seed(seed)
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
list_dict = {}
list_dict_size = 0
while list_dict_size < self._batch_size:
try:
data_row = next(self._iterator)
except StopIteration:
self._epoch += 1
self._iterator = self._generator_function()
data_row = next(self._iterator)
for index, key in enumerate(self._keys):
if key not in data_row.keys():
raise KeyError("key mismatch between dicts emitted by GenFun "
"Expected {} keys; got {}".format(
self._keys, data_row.keys()))
list_dict.setdefault(self._col_placeholders[index],
list()).append(data_row[key])
list_dict_size += 1
if self._pad_value is not None:
feed_dict = {key: np.asarray(_pad_if_needed(item, self._pad_value))
for key, item in list(list_dict.items())}
else:
feed_dict = {key: np.asarray(item)
for key, item in list(list_dict.items())}
return feed_dict
def _enqueue_data(data,
capacity,
shuffle=False,
min_after_dequeue=None,
num_threads=1,
seed=None,
name="enqueue_input",
enqueue_size=1,
num_epochs=None,
pad_value=None):
"""Creates a queue filled from a numpy array or pandas `DataFrame`.
Returns a queue filled with the rows of the given (`OrderedDict` of) array
or `DataFrame`. In the case of a pandas `DataFrame`, the first enqueued
`Tensor` corresponds to the index of the `DataFrame`. For (`OrderedDict` of)
numpy arrays, the first enqueued `Tensor` contains the row number.
Args:
data: a numpy `ndarray`, `OrderedDict` of numpy arrays, or a generator
yielding `dict`s of numpy arrays or pandas `DataFrame` that will be read
into the queue.
capacity: the capacity of the queue.
shuffle: whether or not to shuffle the rows of the array.
min_after_dequeue: minimum number of elements that can remain in the queue
after a dequeue operation. Only used when `shuffle` is true. If not set,
defaults to `capacity` / 4.
num_threads: number of threads used for reading and enqueueing.
seed: used to seed shuffling and reader starting points.
name: a scope name identifying the data.
enqueue_size: the number of rows to enqueue per step.
num_epochs: limit enqueuing to a specified number of epochs, if provided.
pad_value: default value for dynamic padding of data samples, if provided.
Returns:
A queue filled with the rows of the given (`OrderedDict` of) array or
`DataFrame`.
Raises:
TypeError: `data` is not a Pandas `DataFrame`, an `OrderedDict` of numpy
arrays, a numpy `ndarray`, or a generator producing these.
NotImplementedError: padding and shuffling data at the same time.
NotImplementedError: padding usage with non generator data type.
"""
with ops.name_scope(name):
if isinstance(data, np.ndarray):
types = [dtypes.int64, dtypes.as_dtype(data.dtype)]
queue_shapes = [(), data.shape[1:]]
get_feed_fn = _ArrayFeedFn
elif isinstance(data, collections.OrderedDict):
types = [dtypes.int64] + [
dtypes.as_dtype(col.dtype) for col in data.values()
]
queue_shapes = [()] + [col.shape[1:] for col in data.values()]
get_feed_fn = _OrderedDictNumpyFeedFn
elif isinstance(data, tp.FunctionType):
x_first_el = six.next(data())
x_first_keys = sorted(x_first_el.keys())
x_first_values = [x_first_el[key] for key in x_first_keys]
types = [dtypes.as_dtype(col.dtype) for col in x_first_values]
queue_shapes = [col.shape for col in x_first_values]
get_feed_fn = _GeneratorFeedFn
elif HAS_PANDAS and isinstance(data, pd.DataFrame):
types = [
dtypes.as_dtype(dt) for dt in [data.index.dtype] + list(data.dtypes)
]
queue_shapes = [() for _ in types]
get_feed_fn = _PandasFeedFn
else:
raise TypeError(
"data must be either a numpy array or pandas DataFrame if pandas is "
"installed; got {}".format(type(data).__name__))
pad_data = pad_value is not None
if pad_data and get_feed_fn is not _GeneratorFeedFn:
raise NotImplementedError(
"padding is only available with generator usage")
if shuffle and pad_data:
raise NotImplementedError(
"padding and shuffling data at the same time is not implemented")
# TODO(jamieas): TensorBoard warnings for all warnings below once available.
if num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with num_epochs and num_threads > 1. "
"num_epochs is applied per thread, so this will produce more "
"epochs than you probably intend. "
"If you want to limit epochs, use one thread.")
if shuffle and num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with shuffle=True, num_threads > 1, and "
"num_epochs. This will create multiple threads, all reading the "
"array/dataframe in order adding to the same shuffling queue; the "
"results will likely not be sufficiently shuffled.")
if not shuffle and num_threads > 1:
logging.warning(
"enqueue_data was called with shuffle=False and num_threads > 1. "
"This will create multiple threads, all reading the "
"array/dataframe in order. If you want examples read in order, use"
" one thread; if you want multiple threads, enable shuffling.")
if shuffle:
min_after_dequeue = int(capacity / 4 if min_after_dequeue is None else
min_after_dequeue)
queue = data_flow_ops.RandomShuffleQueue(
capacity,
min_after_dequeue,
dtypes=types,
shapes=queue_shapes,
seed=seed)
elif pad_data:
min_after_dequeue = 0 # just for the summary text
queue_shapes = list(map(
lambda x: tuple(list(x[:-1]) + [None]) if len(x) > 0 else x,
queue_shapes))
queue = data_flow_ops.PaddingFIFOQueue(
capacity, dtypes=types, shapes=queue_shapes)
else:
min_after_dequeue = 0 # just for the summary text
queue = data_flow_ops.FIFOQueue(
capacity, dtypes=types, shapes=queue_shapes)
enqueue_ops = []
feed_fns = []
for i in range(num_threads):
# Note the placeholders have no shapes, so they will accept any
# enqueue_size. enqueue_many below will break them up.
placeholders = [array_ops.placeholder(t) for t in types]
enqueue_ops.append(queue.enqueue_many(placeholders))
seed_i = None if seed is None else (i + 1) * seed
if not pad_data:
feed_fns.append(
get_feed_fn(
placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i,
num_epochs=num_epochs))
else:
feed_fns.append(
get_feed_fn(
placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i,
num_epochs=num_epochs,
pad_value=pad_value))
runner = fqr._FeedingQueueRunner( # pylint: disable=protected-access
queue=queue, enqueue_ops=enqueue_ops, feed_fns=feed_fns)
queue_runner.add_queue_runner(runner)
full = (math_ops.cast(
math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) * (1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = ("queue/%sfraction_over_%d_of_%d_full" %
(queue.name, min_after_dequeue,
capacity - min_after_dequeue))
summary.scalar(summary_name, full)
return queue
| apache-2.0 |
Thomsen22/MissingMoney | Day Ahead Market - 24 Bus/optimization.py | 1 | 10171 | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 8 13:54:04 2016
@author: Søren
"""
# Python standard modules
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from collections import defaultdict
import seaborn as sns
from itertools import cycle, islice
# Own modules
from dayahead_optclass import DayAhead
def optimization():
market = DayAhead()
market.optimize()
times = market.data.times
zones = market.data.zones
generators = market.data.generators
lines = market.data.lines
consumption = market.data.consumption
network = market.data.network
df_zonalconsumption = market.data.df_zonalconsumption
# Zonal prices, found by taking the dual of the powerbalance constraint
df_price = pd.DataFrame(index = times, data = {z: [market.constraints.powerbalance[z,t].pi for t in times] for z in zones})
# Generator production,
df_genprod = pd.DataFrame(index = times, data = {g: [market.variables.gprod[g,t].x for t in times] for g in generators.index})
# Line flow, from node -> to node
df_lineflow = pd.DataFrame(index = times, data = {l: [market.variables.linelimit[l,t].x for t in times] for l in lines})
# Loadshedding in the system
df_loadshed = pd.DataFrame(index = times, data = {z: [market.variables.loadshed[z,t].x for t in times] for z in zones})
# Wind production
df_windprod = pd.DataFrame(index = times, data = {z: [market.variables.windprod[z,t].x for t in times] for z in zones})
windproduction = df_windprod.sum(axis=1)
# Solar production
df_solarprod = pd.DataFrame(index = times, data = {z: [market.variables.solarprod[z,t].x for t in times] for z in zones})
solarproduction = df_solarprod.sum(axis=1)
# Total consumption
total_consumption = consumption.set_index(np.arange(0,len(consumption)))
total_consumption = (total_consumption.sum(axis=1)) - (df_loadshed.sum(axis=1))
# Calculating the wind penetration level
wind_penetration = (windproduction / total_consumption) * 100
solar_penetration = (solarproduction / total_consumption) * 100
df_windsolarload = pd.DataFrame({'Time': df_windprod.index, 'WindProduction[MW]': windproduction.values, 'SolarProduction[MW]': solarproduction.values,\
'TotalConsumption[MW]': total_consumption.values, 'WindPenetration[%]': wind_penetration.values, 'SolarPenetration[%]': solar_penetration.values}).set_index('Time')
# Assigning each zone to a generator
zone_generator = generators[['name','country']].values.tolist()
zone_for_gens = defaultdict(list)
for generator, zone in zone_generator:
zone_for_gens[generator].append(zone)
# Creating a dictionary to contain the market prices
dict_price = {}
for t in times:
for z in np.arange(len(zones)):
dict_price[df_price.columns[z], t] = df_price.ix[df_price.index[t], df_price.columns[z]]
# Creating a dictionary to contain the generator production
dict_genprod = {}
for t in times:
for g in np.arange(len(generators.index)):
dict_genprod[df_genprod.columns[g], t] = df_genprod.ix[df_genprod.index[t], df_genprod.columns[g]]
# Calculating the revenue for each generator
dict_revenue = {}
for t in times:
for g in generators.index:
for z in zone_for_gens[g]:
dict_revenue[g, t] = dict_price[z, t] * dict_genprod[g, t]
# Summing the revenue for all hours
dict_revenue_total = {}
for g in generators.index:
dict_revenue_total[g] = sum(dict_revenue[g, t] for t in times)
df_revenueprod = pd.DataFrame([[key,value] for key,value in dict_revenue_total.items()],columns=["Generator","Total Revenue"]).set_index('Generator')
df_revenueprod['Total Production'] = df_genprod.sum(axis=0)
# Catching the start-up number
dict_startup_number = {}
for t in times[1:]:
for g in generators.index:
dict_startup_number[g, t] = 0
if(dict_genprod[g, t] > 0 and dict_genprod[g, t-1] == 0):
dict_startup_number[g, t] = 1
# Calculating total number of start-ups for all generators
dict_startup_number_total = {}
for g in generators.index:
dict_startup_number_total[g] = sum(dict_startup_number[g,t] for t in times[1:])
startup_number_df = pd.DataFrame([[key,value] for key,value in dict_startup_number_total.items()],columns=["Generator","Total Start-Ups"]).set_index('Generator')
# Assigning different types of generators
hydro_gens = [g for g in generators.index if generators.primaryfuel[g] == 'Hydro']
coal_gens = [g for g in generators.index if generators.primaryfuel[g] == 'Coal']
gas_gens = [g for g in generators.index if generators.primaryfuel[g] == 'Gas']
nuclear_gens = [g for g in generators.index if generators.primaryfuel[g] == 'Nuclear']
oil_gens = [g for g in generators.index if generators.primaryfuel[g] == 'Oil']
# Calculating total hourly production for all technologies
hydroP = {}
for g in hydro_gens:
hydroP[g] = df_genprod[g]
totalhydroP = {}
for t in times:
totalhydroP[t] = sum(hydroP[g][t] for g in hydro_gens)
coalP = {}
for g in coal_gens:
coalP[g] = df_genprod[g]
totalcoalP = {}
for t in times:
totalcoalP[t] = sum(coalP[g][t] for g in coal_gens)
gasP = {}
for g in gas_gens:
gasP[g] = df_genprod[g]
totalgasP = {}
for t in times:
totalgasP[t] = sum(gasP[g][t] for g in gas_gens)
nuclearP = {}
for g in nuclear_gens:
nuclearP[g] = df_genprod[g]
totalnuclearP = {}
for t in times:
totalnuclearP[t] = sum(nuclearP[g][t] for g in nuclear_gens)
oilP = {}
for g in oil_gens:
oilP[g] = df_genprod[g]
totaloilP = {}
for t in times:
totaloilP[t] = sum(oilP[g][t] for g in oil_gens)
# Returning respective production into a dataframe and merges
Oil_df = pd.DataFrame([[key,value] for key,value in totaloilP.items()],columns=["Times", "Oil Production"])#.set_index('Times')
Nuclear_df = pd.DataFrame([[key,value] for key,value in totalnuclearP.items()],columns=["Times", "Nuclear Production"])#.set_index('Times')
Gas_df = pd.DataFrame([[key,value] for key,value in totalgasP.items()],columns=["Times", "Gas Production"])#.set_index('Times')
Coal_df = pd.DataFrame([[key,value] for key,value in totalcoalP.items()],columns=["Times", "Coal Production"])#.set_index('Times')
Hydro_df = pd.DataFrame([[key,value] for key,value in totalhydroP.items()],columns=["Times", "Hydro Production"])#.set_index('Times')
Wind_df = pd.DataFrame(df_windsolarload['WindProduction[MW]'])
Wind_df.rename(columns={'WindProduction[MW]': 'Wind Production'}, inplace=True)
Wind_df['Times'] = times
Solar_df = pd.DataFrame(df_windsolarload['SolarProduction[MW]'])
Solar_df.rename(columns={'SolarProduction[MW]': 'Solar Production'}, inplace=True)
Solar_df['Times'] = times
df_prodtype = pd.DataFrame(Wind_df.merge(Solar_df,on='Times').merge(Hydro_df,on='Times')\
.merge(Nuclear_df,on='Times').merge(Coal_df,on='Times').merge(Gas_df,on='Times').merge(Oil_df,on='Times').set_index('Times'))
# Plots the production as an area diagram
plt.figure(1)
my_colors = list(islice(cycle([sns.xkcd_rgb["windows blue"], sns.xkcd_rgb["yellow"], sns.xkcd_rgb["pale red"]\
, sns.xkcd_rgb["medium green"], sns.xkcd_rgb["amber"], sns.xkcd_rgb["deep purple"],'grey']), None, len(df_prodtype)))
ax = df_prodtype.plot(kind='area', color=my_colors)
ax.set_ylabel('$MW$')
ax.set_xlabel('$Hours$')
ax.set_axis_bgcolor('whitesmoke')
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.15),
ncol=3, fancybox=True, shadow=True)
# plt.savefig('productionareaplot.pdf')
plt.show()
return df_price, df_genprod, df_lineflow, df_loadshed, df_windsolarload, df_revenueprod, df_prodtype, network, times, generators, startup_number_df, df_zonalconsumption, df_windprod, df_solarprod
def marketoptimization():
df_price, df_genprod, df_lineflow, df_loadshed, df_windsolarload, df_revenueprod, df_prodtype, network, times, generators, startup_number_df, df_zonalconsumption, df_windprod, df_solarprod = optimization()
wind_penetration = df_windsolarload['WindPenetration[%]'].mean(axis=0)
solar_penetration = df_windsolarload['SolarPenetration[%]'].mean(axis=0)
dict_windcost = {}
for z in df_price.columns:
for t in df_price.index:
dict_windcost[z,t] = df_windprod[z][t] * df_price[z][t]
totalwindcost = sum(dict_windcost.values())
dict_solarcost = {}
for z in df_price.columns:
for t in df_price.index:
dict_solarcost[z,t] = df_solarprod[z][t] * df_price[z][t]
totalsolarcost = sum(dict_solarcost.values())
# A dataframe is returned to Excel as a csv file for further work
gen_dataframe = df_revenueprod
gen_dataframe['Total Revenue'] = gen_dataframe['Total Revenue'].map('{:.2f}'.format)
gen_dataframe['Total Production'] = gen_dataframe['Total Production'].map('{:.2f}'.format)
gen_dataframe['Number of S/U´s'] = startup_number_df['Total Start-Ups']
gen_dataframe['Capacity'] = generators.capacity
gen_dataframe['Marginal Cost'] = generators.lincost
gen_dataframe['S/U cost'] = generators.cyclecost
gen_dataframe['Fixed O&M Cost'] = generators.fixedomcost
gen_dataframe['Var O&M Cost'] = generators.varomcost
gen_dataframe['Levelized Capital Cost'] = generators.levcapcost
gen_dataframe['Primary Fuel'] = generators.primaryfuel
gen_dataframe.to_csv('revenue_cost_gen.csv')
return gen_dataframe, wind_penetration, solar_penetration, totalwindcost, totalsolarcost | gpl-3.0 |
sallum/driver-telematics | RegressionDriver.py | 1 | 2127 | import numpy as np
from sklearn.ensemble import GradientBoostingRegressor
from random import sample
class RegressionDriver(object):
"""Class for Regression-based analysis of Driver traces"""
def __init__(self, driver, datadict):
"""Initialize by providing a (positive) driver example and a dictionary of (negative) driver references."""
self.driver = driver
self.numfeatures = self.driver.num_features
featurelist = []
self.__indexlist = []
for trace in self.driver.traces:
self.__indexlist.append(trace.identifier)
featurelist.append(trace.features)
# Initialize train and test np arrays
self.__traindata = np.asarray(featurelist)
self.__testdata = np.asarray(featurelist)
self.__trainlabels = np.ones((self.__traindata.shape[0],))
data = np.empty((0, driver.num_features), float)
setkeys = datadict.keys()
if driver.identifier in setkeys:
setkeys.remove(driver.identifier)
else:
setkeys = sample(setkeys, len(setkeys) - 1)
for key in setkeys:
if key != driver.identifier:
data = np.append(data, np.asarray(datadict[key]), axis=0)
self.__traindata = np.append(self.__traindata, data, axis=0)
self.__trainlabels = np.append(self.__trainlabels, np.zeros((data.shape[0],)), axis=0)
self.__y = np.ones((self.__testdata.shape[0],))
def classify(self):
"""Perform classification"""
clf = GradientBoostingRegressor(n_estimators=100, max_depth=4)
clf.fit(self.__traindata, self.__trainlabels)
self.__y = clf.predict(self.__testdata)
def toKaggle(self):
"""Return string in Kaggle submission format"""
returnstring = ""
for i in xrange(len(self.__indexlist) - 1):
returnstring += "%d_%d,%.3f\n" % (self.driver.identifier, self.__indexlist[i], self.__y[i])
returnstring += "%d_%d,%.3f" % (self.driver.identifier, self.__indexlist[len(self.__indexlist)-1], self.__y[len(self.__indexlist)-1])
return returnstring
| bsd-2-clause |
davidgbe/scikit-learn | sklearn/utils/tests/test_shortest_path.py | 303 | 2841 | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix = dist_matrix + dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
| bsd-3-clause |
murali-munna/scikit-learn | sklearn/qda.py | 140 | 7682 | """
Quadratic Discriminant Analysis
"""
# Author: Matthieu Perrot <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import BaseEstimator, ClassifierMixin
from .externals.six.moves import xrange
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
__all__ = ['QDA']
class QDA(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis (QDA)
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
Examples
--------
>>> from sklearn.qda import QDA
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QDA()
>>> clf.fit(X, y)
QDA(priors=None, reg_param=0.0)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.lda.LDA: Linear discriminant analysis
"""
def __init__(self, priors=None, reg_param=0.):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
def fit(self, X, y, store_covariances=False, tol=1.0e-4):
"""
Fit the QDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
"""
X, y = check_X_y(X, y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| bsd-3-clause |
dendisuhubdy/tensorflow | tensorflow/examples/learn/iris_custom_decay_dnn.py | 43 | 3572 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with exponential decay."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
X_FEATURE = 'x' # Name of the input feature.
def my_model(features, labels, mode):
"""DNN with three hidden layers."""
# Create three fully connected layers respectively of size 10, 20, and 10.
net = features[X_FEATURE]
for units in [10, 20, 10]:
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
# Compute logits (1 per class).
logits = tf.layers.dense(net, 3, activation=None)
# Compute predictions.
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Compute loss.
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Create training op with exponentially decaying learning rate.
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_global_step()
learning_rate = tf.train.exponential_decay(
learning_rate=0.1, global_step=global_step,
decay_steps=100, decay_rate=0.001)
optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss, global_step=global_step)
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
# Compute evaluation metrics.
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = tf.estimator.Estimator(model_fn=my_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=1000)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
e-koch/pyspeckit | pyspeckit/spectrum/models/formaldehyde.py | 1 | 23649 | """
===========================
Formaldehyde cm-line fitter
===========================
This is a formaldehyde 1_11-1_10 / 2_12-2_11 fitter. It includes hyperfine
components of the formaldehyde lines and has both LTE and RADEX LVG based
models
Module API
^^^^^^^^^^
"""
from __future__ import print_function
import numpy as np
from ...mpfit import mpfit
from .. import units
from . import fitter,model,modelgrid
import matplotlib.cbook as mpcb
import copy
from . import hyperfine
from ...specwarnings import warn
from astropy.extern.six.moves import xrange
try: # for model grid reading
import astropy.io.fits as pyfits
except ImportError:
import pyfits
try:
import scipy.interpolate
import scipy.ndimage
scipyOK = True
except ImportError:
scipyOK=False
line_names = ['oneone','twotwo','threethree']
line_names = ['oneone_f10', 'oneone_f01', 'oneone_f22', 'oneone_f21',
'oneone_f12', 'oneone_f11', 'twotwo_f11', 'twotwo_f12',
'twotwo_f21', 'twotwo_f32', 'twotwo_f33', 'twotwo_f22',
'twotwo_f23']
# http://adsabs.harvard.edu/abs/1971ApJ...169..429T has the most accurate freqs
# http://adsabs.harvard.edu/abs/1972ApJ...174..463T [twotwo]
central_freq_dict = {
'oneone': 4.82965996e9,
'twotwo': 14.48847881e9,
'threethree': 28.97480e9,
}
line_strength_dict={
'oneone_f10': 4.,
'oneone_f01': 4.,
'oneone_f22': 15.,
'oneone_f21': 5.,
'oneone_f12': 5.,
'oneone_f11': 3.,
'twotwo_f11': 15.,
'twotwo_f12': 5.,
'twotwo_f21': 5.,
'twotwo_f32': 5.19,
'twotwo_f33': 41.48,
'twotwo_f22': 23.15,
'twotwo_f23': 5.19,
'threethree_f22':1,
'threethree_f44':1,
'threethree_f33':1,
}
relative_strength_total_degeneracy={
'oneone_f10': 36.,
'oneone_f01': 36.,
'oneone_f22': 36.,
'oneone_f21': 36.,
'oneone_f12': 36.,
'oneone_f11': 36.,
'twotwo_f11': 100.01,
'twotwo_f12': 100.01,
'twotwo_f21': 100.01,
'twotwo_f32': 100.01,
'twotwo_f33': 100.01,
'twotwo_f22': 100.01,
'twotwo_f23': 100.01,
'threethree_f22':3.0,
'threethree_f44':3.0,
'threethree_f33':3.0,
}
hf_freq_dict={
'oneone_f10':4.82965996e9 - 18.53e3,
'oneone_f01':4.82965996e9 - 1.34e3,
'oneone_f22':4.82965996e9 - 0.35e3,
'oneone_f21':4.82965996e9 + 4.05e3,
'oneone_f12':4.82965996e9 + 6.48e3,
'oneone_f11':4.82965996e9 + 11.08e3,
'twotwo_f11':14.48847881e9 - 19.97e3,
'twotwo_f12':14.48847881e9 - 7.03e3,
'twotwo_f21':14.48847881e9 - 2.20e3,
'twotwo_f32':14.48847881e9 + 0.12e3,
'twotwo_f33':14.48847881e9 + 0.89e3,
'twotwo_f22':14.48847881e9 + 10.74e3,
'twotwo_f23':14.48847881e9 + 11.51e3,
'threethree_f22':28.97478e9,
'threethree_f44':28.97480e9,
'threethree_f33':28.97481e9,
}
freq_dict = copy.copy(hf_freq_dict)
freq_dict.update(central_freq_dict)
aval_dict = {
'oneone': 10**-8.44801, #64*!pi**4/(3*h*c**3)*nu11**3*mu0**2*(1/2.)
'twotwo': 10**-7.49373, #64*!pi**4/(3*h*c**3)*nu22**3*mu0**2*(2/3.)
'threethree': 10**-6.89179, #64*!pi**4/(3*h*c**3)*nu33**3*mu0**2*(3/4.)
}
hf_aval_dict={
'oneone_f10':10**-8.92509,
'oneone_f01':10**-8.44797,
'oneone_f22':10**-8.57294,
'oneone_f21':10**-9.05004,
'oneone_f12':10**-8.82819,
'oneone_f11':10**-9.05009,
'twotwo_f11':10**-7.61876,
'twotwo_f12':10**-8.09586,
'twotwo_f21':10**-8.31771,
'twotwo_f32':10**-8.44804,
'twotwo_f33':10**-7.54494,
'twotwo_f22':10**-7.65221,
'twotwo_f23':10**-8.30191,
'threethree_f22':10**-6.94294,
'threethree_f44':10**-6.91981,
'threethree_f33':10**-6.96736,
}
ortho_dict = {
'oneone': False,
'twotwo': False,
'threethree': False,
}
n_ortho = np.arange(0,28,3) # 0..3..27
n_para = np.array([x for x in range(28) if x % 3 != 0])
voff_lines_dict = {
'oneone': [(hf_freq_dict[f]-freq_dict['oneone'])/freq_dict['oneone']*units.speedoflight_ms for f in hf_freq_dict.keys() if "oneone" in f],
'twotwo': [(hf_freq_dict[f]-freq_dict['twotwo'])/freq_dict['twotwo']*units.speedoflight_ms for f in hf_freq_dict.keys() if "twotwo" in f],
'threethree': [(hf_freq_dict[f]-freq_dict['threethree'])/freq_dict['threethree']*units.speedoflight_ms for f in hf_freq_dict.keys() if "threethree" in f],
}
voff_lines_dict={ # opposite signs of freq offset
'oneone_f10': + 18.53e3/freq_dict['oneone'] * units.speedoflight_ms / 1000.0,
'oneone_f01': + 1.34e3 /freq_dict['oneone'] * units.speedoflight_ms / 1000.0,
'oneone_f22': + 0.35e3 /freq_dict['oneone'] * units.speedoflight_ms / 1000.0,
'oneone_f21': - 4.05e3 /freq_dict['oneone'] * units.speedoflight_ms / 1000.0,
'oneone_f12': - 6.48e3 /freq_dict['oneone'] * units.speedoflight_ms / 1000.0,
'oneone_f11': - 11.08e3/freq_dict['oneone'] * units.speedoflight_ms / 1000.0,
'twotwo_f11': + 19.97e3/freq_dict['twotwo'] * units.speedoflight_ms / 1000.0,
'twotwo_f12': + 7.03e3/freq_dict['twotwo'] * units.speedoflight_ms / 1000.0,
'twotwo_f21': + 2.20e3/freq_dict['twotwo'] * units.speedoflight_ms / 1000.0,
'twotwo_f32': - 0.12e3/freq_dict['twotwo'] * units.speedoflight_ms / 1000.0,
'twotwo_f33': - 0.89e3/freq_dict['twotwo'] * units.speedoflight_ms / 1000.0,
'twotwo_f22': - 10.74e3/freq_dict['twotwo'] * units.speedoflight_ms / 1000.0,
'twotwo_f23': - 11.51e3/freq_dict['twotwo'] * units.speedoflight_ms / 1000.0,
'threethree_f22':28.97478e9,
'threethree_f44':28.97480e9,
'threethree_f33':28.97481e9,
}
formaldehyde_vtau = hyperfine.hyperfinemodel(line_names, voff_lines_dict,
freq_dict, line_strength_dict,
relative_strength_total_degeneracy)
formaldehyde_vtau_fitter = formaldehyde_vtau.fitter
formaldehyde_vtau_vheight_fitter = formaldehyde_vtau.vheight_fitter
formaldehyde_vtau_tbg_fitter = formaldehyde_vtau.background_fitter
def formaldehyde_radex(xarr, density=4, column=13, xoff_v=0.0, width=1.0,
grid_vwidth=1.0, grid_vwidth_scale=False, texgrid=None,
taugrid=None, hdr=None, path_to_texgrid='',
path_to_taugrid='', temperature_gridnumber=3,
debug=False, verbose=False, **kwargs):
"""
Use a grid of RADEX-computed models to make a model line spectrum
The RADEX models have to be available somewhere.
OR they can be passed as arrays. If as arrays, the form should be:
texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))
xarr must be a SpectroscopicAxis instance
xoff_v, width are both in km/s
grid_vwidth is the velocity assumed when computing the grid in km/s
this is important because tau = modeltau / width (see, e.g.,
Draine 2011 textbook pgs 219-230)
grid_vwidth_scale is True or False: False for LVG, True for Sphere
"""
if texgrid is None and taugrid is None:
if path_to_texgrid == '' or path_to_taugrid=='':
raise IOError("Must specify model grids to use.")
else:
taugrid = [pyfits.getdata(path_to_taugrid)]
texgrid = [pyfits.getdata(path_to_texgrid)]
hdr = pyfits.getheader(path_to_taugrid)
yinds,xinds = np.indices(taugrid[0].shape[1:])
densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
minfreq = (4.8,)
maxfreq = (5.0,)
elif len(taugrid)==len(texgrid) and hdr is not None:
minfreq,maxfreq,texgrid = zip(*texgrid)
minfreq,maxfreq,taugrid = zip(*taugrid)
yinds,xinds = np.indices(taugrid[0].shape[1:])
densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
else:
raise Exception
# Convert X-units to frequency in GHz
xarr = xarr.as_unit('Hz', quiet=True)
tau_nu_cumul = np.zeros(len(xarr))
gridval1 = np.interp(density, densityarr[0,:], xinds[0,:])
gridval2 = np.interp(column, columnarr[:,0], yinds[:,0])
if np.isnan(gridval1) or np.isnan(gridval2):
raise ValueError("Invalid column/density")
if scipyOK:
slices = [temperature_gridnumber] + [slice(np.floor(gv),np.floor(gv)+2) for gv in (gridval2,gridval1)]
tau = [scipy.ndimage.map_coordinates(tg[slices],np.array([[gridval2%1],[gridval1%1]]),order=1) for tg in taugrid]
tex = [scipy.ndimage.map_coordinates(tg[slices],np.array([[gridval2%1],[gridval1%1]]),order=1) for tg in texgrid]
else:
raise ImportError("Couldn't import scipy, therefore cannot interpolate")
#tau = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,taugrid[temperature_gridnumber,:,:])
#tex = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,texgrid[temperature_gridnumber,:,:])
if verbose:
print("density %20.12g column %20.12g: tau %20.12g tex %20.12g" % (density, column, tau, tex))
if debug:
import pdb; pdb.set_trace()
spec = np.sum([(formaldehyde_vtau(xarr,Tex=float(tex[ii]),tau=float(tau[ii]),xoff_v=xoff_v,width=width, **kwargs)
* (xarr.as_unit('GHz')>minfreq[ii]) * (xarr.as_unit('GHz')<maxfreq[ii])) for ii in xrange(len(tex))],
axis=0)
return spec
def formaldehyde_radex_orthopara_temp(xarr, density=4, column=13,
orthopara=1.0, temperature=15.0,
xoff_v=0.0, width=1.0,
Tbackground1=2.73,
Tbackground2=2.73,
grid_vwidth=1.0,
grid_vwidth_scale=False, texgrid=None,
taugrid=None, hdr=None,
path_to_texgrid='', path_to_taugrid='',
debug=False, verbose=False,
getpars=False, **kwargs):
"""
Use a grid of RADEX-computed models to make a model line spectrum
The RADEX models have to be available somewhere.
OR they can be passed as arrays. If as arrays, the form should be:
texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))
xarr must be a SpectroscopicAxis instance
xoff_v, width are both in km/s
grid_vwidth is the velocity assumed when computing the grid in km/s
this is important because tau = modeltau / width (see, e.g.,
Draine 2011 textbook pgs 219-230)
grid_vwidth_scale is True or False: False for LVG, True for Sphere
"""
if texgrid is None and taugrid is None:
if path_to_texgrid == '' or path_to_taugrid=='':
raise IOError("Must specify model grids to use.")
else:
taugrid = [pyfits.getdata(path_to_taugrid)]
texgrid = [pyfits.getdata(path_to_texgrid)]
hdr = pyfits.getheader(path_to_taugrid)
minfreq = (4.8,)
maxfreq = (5.0,)
elif len(taugrid)==len(texgrid) and hdr is not None:
minfreq,maxfreq,texgrid = zip(*texgrid)
minfreq,maxfreq,taugrid = zip(*taugrid)
else:
raise Exception
densityarr = (np.arange(taugrid[0].shape[3])+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
columnarr = (np.arange(taugrid[0].shape[2])+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
temparr = (np.arange(taugrid[0].shape[1])+hdr['CRPIX3']-1)*hdr['CDELT3']+hdr['CRVAL3'] # temperature
oprarr = (np.arange(taugrid[0].shape[0])+hdr['CRPIX4']-1)*hdr['CDELT4']+hdr['CRVAL4'] # log ortho/para ratio
gridval1 = np.interp(density, densityarr, np.arange(len(densityarr)))
gridval2 = np.interp(column, columnarr, np.arange(len(columnarr)))
gridval3 = np.interp(temperature, temparr, np.arange(len(temparr)))
gridval4 = np.interp(orthopara, oprarr, np.arange(len(oprarr)))
if np.isnan(gridval1) or np.isnan(gridval2):
raise ValueError("Invalid column/density")
if scipyOK:
slices = [slice(int(np.floor(gv)),int(np.floor(gv)+2))
for gv in (gridval4,gridval3,gridval2,gridval1)]
tau = [scipy.ndimage.map_coordinates(tg[slices],
np.array([[gridval4 % 1],
[gridval3 % 1],
[gridval2 % 1],
[gridval1 % 1]]),
order=1, prefilter=False)
for tg in taugrid]
tex = [scipy.ndimage.map_coordinates(tg[slices],
np.array([[gridval4 % 1],
[gridval3 % 1],
[gridval2 % 1],
[gridval1 % 1]]),
order=1,prefilter=False)
for tg in texgrid]
else:
raise ImportError("Couldn't import scipy, therefore cannot interpolate")
#tau = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,taugrid[temperature_gridnumber,:,:])
#tex = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,texgrid[temperature_gridnumber,:,:])
# there can be different background temperatures at each frequency
tbg = [Tbackground1,Tbackground2]
if verbose:
print("density %20.12g column: %20.12g temperature: %20.12g opr: %20.12g xoff_v: %20.12g width: %20.12g" % (density, column, temperature, orthopara, xoff_v, width))
print("tau: ",tau," tex: ",tex)
print("minfreq: ",minfreq," maxfreq: ",maxfreq)
print("tbg: ",tbg)
if debug > 1:
import pdb; pdb.set_trace()
if getpars:
return tau,tex
spec = np.sum([(formaldehyde_vtau(xarr.as_unit('Hz', quiet=True),
Tex=float(tex[ii]), tau=float(tau[ii]),
Tbackground=tbg[ii], xoff_v=xoff_v,
width=width, **kwargs)
* (xarr.as_unit('GHz')>minfreq[ii])
* (xarr.as_unit('GHz')<maxfreq[ii]))
for ii in xrange(len(tex))],
axis=0)
return spec
def formaldehyde(xarr, amp=1.0, xoff_v=0.0, width=1.0,
return_hyperfine_components=False, texscale=0.01, tau=0.01, **kwargs):
"""
Generate a model Formaldehyde spectrum based on simple gaussian parameters
the "amplitude" is an essentially arbitrary parameter; we therefore define
it to be Tex given tau=0.01 when passing to the fitter
The final spectrum is then rescaled to that value
"""
mdl = formaldehyde_vtau(xarr, Tex=amp*texscale, tau=tau, xoff_v=xoff_v,
width=width,
return_tau=True,
return_hyperfine_components=return_hyperfine_components, **kwargs)
if return_hyperfine_components:
mdlpeak = np.abs(mdl).squeeze().sum(axis=0).max()
else:
mdlpeak = np.abs(mdl).max()
if mdlpeak > 0:
mdl *= amp/mdlpeak
return mdl
def formaldehyde_pyradex(xarr, density=4, column=13, temperature=20,
xoff_v=0.0, opr=1.0, width=1.0, tbackground=2.73,
grid_vwidth=1.0, debug=False, verbose=False,
**kwargs):
"""
Use a grid of RADEX-computed models to make a model line spectrum
The RADEX models have to be available somewhere.
OR they can be passed as arrays. If as arrays, the form should be:
texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))
xarr must be a SpectroscopicAxis instance
xoff_v, width are both in km/s
grid_vwidth is the velocity assumed when computing the grid in km/s
this is important because tau = modeltau / width (see, e.g.,
Draine 2011 textbook pgs 219-230)
"""
raise NotImplementedError("Not done yet.")
import pyradex
# Convert X-units to frequency in GHz
xarr = xarr.as_unit('Hz', quiet=True)
tb_nu_cumul = np.zeros(len(xarr))
R = pyradex.Radex(molecule='oh2co-h2', column=column,
temperature=temperature, density=10**density,
tbackground=tbackground,)
spec = np.sum([(formaldehyde_vtau(xarr,Tex=float(tex[ii]),tau=float(tau[ii]),xoff_v=xoff_v,width=width, **kwargs)
* (xarr.as_unit('GHz')>minfreq[ii]) * (xarr.as_unit('GHz')<maxfreq[ii])) for ii in xrange(len(tex))],
axis=0)
return spec
class formaldehyde_model(model.SpectralModel):
def formaldehyde_integral(self, modelpars, linename='oneone'):
"""
Return the integral of the individual components (ignoring height)
"""
raise NotImplementedError("Not implemented, but the integral is just amplitude * width * sqrt(2*pi)")
# produced by directly computing the integral of gaussians and formaldehydeians as a function of
# line width and then fitting that with a broken logarithmic power law
# The errors are <0.5% for all widths
formaldehyde_to_gaussian_ratio_coefs = {
'lt0.1_oneone': np.array([ -5.784020,-40.058798,-111.172706,-154.256411,-106.593122,-28.933119]),
'gt0.1_oneone': np.array([ 0.038548, -0.071162, -0.045710, 0.183828, -0.145429, 0.040039]),
'lt0.1_twotwo': np.array([ 1.156561, 6.638570, 11.782065, -0.429536,-24.860297,-27.902274, -9.510288]),
'gt0.1_twotwo': np.array([ -0.090646, 0.078204, 0.123181, -0.175590, 0.089506, -0.034687, 0.008676]),
}
integ = 0
if len(modelpars) % 3 == 0:
for amp,cen,width in np.reshape(modelpars,[len(modelpars)/3,3]):
gaussint = amp*width*np.sqrt(2.0*np.pi)
cftype = "gt0.1_"+linename if width > 0.1 else "lt0.1_"+linename
correction_factor = 10**np.polyval(formaldehyde_to_gaussian_ratio_coefs[cftype], np.log10(width) )
# debug statement print("Two components of the integral: amp %g, width %g, gaussint %g, correction_factor %g " % (amp,width,gaussint,correction_factor))
integ += gaussint*correction_factor
return integ
formaldehyde_fitter = formaldehyde_model(formaldehyde, 3,
parnames=['amp','center','width'],
parlimited=[(False,False),(False,False), (True,False)],
parlimits=[(0,0), (0,0), (0,0)],
shortvarnames=("A","v","\\sigma"), # specify the parameter names (TeX is OK)
fitunit='Hz' )
formaldehyde_vheight_fitter = formaldehyde_model(fitter.vheightmodel(formaldehyde), 4,
parnames=['height','amp','center','width'],
parlimited=[(False,False),(False,False),(False,False), (True,False)],
parlimits=[(0,0), (0,0), (0,0), (0,0)],
shortvarnames=("H","A","v","\\sigma"), # specify the parameter names (TeX is OK)
fitunit='Hz' )
# Create a tau-only fit:
def formaldehyde_radex_tau(xarr, density=4, column=13, xoff_v=0.0, width=1.0,
grid_vwidth=1.0, grid_vwidth_scale=False,
taugrid=None, hdr=None, path_to_taugrid='',
temperature_gridnumber=3, debug=False,
verbose=False, return_hyperfine_components=False,
**kwargs):
"""
Use a grid of RADEX-computed models to make a model line spectrum
* uses hyperfine components
* assumes *tau* varies but *tex* does not!
The RADEX models have to be available somewhere.
OR they can be passed as arrays. If as arrays, the form should be:
texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))
xarr must be a SpectroscopicAxis instance
xoff_v, width are both in km/s
grid_vwidth is the velocity assumed when computing the grid in km/s
this is important because tau = modeltau / width (see, e.g.,
Draine 2011 textbook pgs 219-230)
grid_vwidth_scale is True or False: False for LVG, True for Sphere
"""
if verbose:
print("Parameters: dens=%f, column=%f, xoff=%f, width=%f" % (density, column, xoff_v, width))
if taugrid is None:
if path_to_taugrid=='':
raise IOError("Must specify model grids to use.")
else:
taugrid = [pyfits.getdata(path_to_taugrid)]
hdr = pyfits.getheader(path_to_taugrid)
yinds,xinds = np.indices(taugrid[0].shape[1:])
densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
minfreq = (4.8,)
maxfreq = (5.0,)
elif hdr is not None:
minfreq,maxfreq,taugrid = zip(*taugrid)
yinds,xinds = np.indices(taugrid[0].shape[1:])
densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
else:
raise Exception
# Convert X-units to frequency in GHz
xarr = xarr.as_unit('Hz', quiet=True)
gridval1 = np.interp(density, densityarr[0,:], xinds[0,:])
gridval2 = np.interp(column, columnarr[:,0], yinds[:,0])
if np.isnan(gridval1) or np.isnan(gridval2):
raise ValueError("Invalid column/density")
if scipyOK:
slices = [temperature_gridnumber] + [slice(np.floor(gv),np.floor(gv)+2) for gv in (gridval2,gridval1)]
tau = [scipy.ndimage.map_coordinates(tg[slices],np.array([[gridval2%1],[gridval1%1]]),order=1) for tg in taugrid]
else:
raise ImportError("Couldn't import scipy, therefore cannot interpolate")
# let the hyperfine module determine the hyperfine components, and pass all of them here
spec_components = [(formaldehyde_vtau(xarr.as_unit('Hz', quiet=True),
tau=float(tau[ii]), xoff_v=xoff_v, width=width,
return_tau=True, return_hyperfine_components=True, **kwargs) *
(xarr.as_unit('GHz')>minfreq[ii]) *
(xarr.as_unit('GHz')<maxfreq[ii]))
for ii in xrange(len(tau))]
# get an array of [n_lines, n_hyperfine, len(xarr)]
if return_hyperfine_components:
return np.array(spec_components).sum(axis=0)
else:
return np.sum(spec_components, axis=0).sum(axis=0)
try:
import pymodelfit
class pmfFormaldehydeModel(pymodelfit.FunctionModel1DAuto):
def f(self, x, amp0=1.0, xoff_v0=0.0,width0=1.0):
return formaldehyde(x,
amp=amp0,
xoff_v=xoff_v0,width=width0)
class pmfFormaldehydeModelVtau(pymodelfit.FunctionModel1DAuto):
def f(self, x, Tex0=1.0, tau0=0.01, xoff_v0=0.0, width0=1.0):
return formaldehyde_vtau(x,
Tex=Tex0, tau=tau0,
xoff_v=xoff_v0,width=width0)
except ImportError:
pass
| mit |
xjchensz/LSFS | LSFS/LSFS_FUN.py | 1 | 5171 | #!usr/bin/python
# -*- coding:utf-8 -*-
import pandas as pd
import numpy as np
import scipy as sp
import os
import random
import time
import sys
from LSFS_TEST import print_W
from EProjSimplex_new import *
def append_module_path():
import sys
paths = [ \
"../gen_data",
"../evaluate",
"../read_data"
]
for path in paths:
if path not in sys.path:
sys.path.append(path)
append_module_path()
import gen_data
import evaluate
import read_data
def norm_2_1(a):
"""
对每行的向量求第二范数,然后对所有范数求和
"""
return np.sum(np.linalg.norm(a, ord = 2, axis=1))
def fun22_value(W, X, H, Q, Y):
"""
||H*X.T*W - H*Y||F范数 ^ 2 + gama * (W.T*Q*W)的迹
"""
gama = 10^-6
return np.linalg.norm(np.dot(np.dot(H, X.T), W) - np.dot(H, Y), ord = "fro")**2 + gama*np.trace(np.dot(np.dot(W.T, Q),W))
def fun8_value(X, Y, W, b):
"""
X : d x n
||X.T * W + 1*b.T - Y||的L2范数 ^ 2 + gama * ( ||W||的F范数 ^ 2 )
"""
gama = 10^-6
n = X.shape[1]
return np.linalg.norm( np.dot(X.T,W) + np.dot(np.ones((n, 1)),b.T) - Y , ord=2)**2 + gama*(np.linalg.norm( W , ord = "fro")**2)
def compute_W(X, Y, H, Q):
# gama = 10^-6
gama = 60
"""
W = (X*H*X.T + gama * Q)^-1 * X*H*Y
"""
W = np.dot( np.dot( np.dot( \
np.linalg.inv( np.dot( np.dot(X,H), X.T)+gama*Q ) \
, X), H), Y)
return W
def compute_H(n):
"""
I => n x n
1 => n x 1
H = I - 1/n * 1 * 1.T
"""
H = np.eye(n,n) - 1/n*np.ones((n,n))
return H
def compute_Q(W):
"""
q(ij) = ||W||2,1 / ||w^j||2
axis = 1 =》 对W的每一行求L2范数
np.linalg.norm(W, ord=2, axis = 1) =》 对每行求L2范数
"""
Q = norm_2_1(W) / np.linalg.norm(W, ord = 2, axis=1)
Q = np.diag(Q)
return Q
def get_W(X, Y):
"""
d特征,c类别,n样本数
X : (d x n)
Y : (n x c)
算法中使用的X的维度是(d x n)
"""
d, n = X.shape
c = Y.shape[1]
# Q初始化为一个单位矩阵
Q = np.eye(d)
# print(Q)
# print("====================")
# H矩阵不变,算一遍即可
H = compute_H(n)
W = compute_W(X, Y, H, Q)
Q = compute_Q(W)
pre_f = cur_f = fun22_value(W, X, H, Q, Y)
# print(W)
# print()
# print(Q)
# print("====================")
NITER = 900
epsilon = 10**-8
for i in range(NITER):
pre_f = cur_f
W = compute_W(X, Y, H, Q)
Q = compute_Q(W)
# print_W(W)
cur_f = fun22_value(W, X, H, Q, Y)
if abs((cur_f - pre_f) / cur_f) < epsilon:
break
return W
def compute_YU(X, W, b):
"""
X : (d x n)
"""
c = W.shape[1]
YU = np.zeros((X.shape[1], c))
# 对于每一个样本,维度 1 x d
for i in range(X.shape[1]):
"""
min ( ||(xi.T) * W + b.T - yi.T||的F范数 ^ 2 )
s.t. yi>=0, 1*yi=1
"""
ad = np.dot(X[:,i:i+1].T, W) + b.T
ad_new, ft = EProjSimplex_new(ad)
YU[i:i+1,:] = ad_new.A
return YU
def compute_b(X, Y, W):
"""
X : d x n
Y : n x c
W : d x c
b = 1/n * (Y.T * 1 - W.T * X * 1)
1 是 n x 1 维的全1矩阵
"""
n = X.shape[1]
b = 1/n*(np.dot(Y.T, np.ones((n,1))) - np.dot(np.dot(W.T, X), np.ones((n,1))))
return b
def get_new_X_Y_YU_W_f(X, Y, XL, YL, XU):
"""
X : d x n
Y : n x c
XL : nl x d
YL : nl x c
XU : nu x d
"""
# n = X.shape[1]
W = get_W(X, Y)
# print_W(W)
# b = 1/n*(np.dot(Y.T, np.ones((n,1))) - np.dot(np.dot(W.T, X), np.ones((n,1))))
b = compute_b(X, Y, W)
YU = compute_YU(XU.T, W, b)
X = sp.concatenate((XL, XU), axis = 0)
Y = sp.concatenate((YL, YU), axis = 0)
X = X.T
cur_f = fun8_value(X, Y, W, b)
return X, Y, YU, W, cur_f
def compute_thea(W):
"""
W : d x c
thea_j = ||w_j||2 / sum(||w_j||2)
j=1:d
"""
# 对W的每行求L2范数,再求和
W_L2_sum = np.sum(np.linalg.norm(W, ord=2, axis = 1))
# 对W的每行求L2范数
s = np.linalg.norm(W, ord=2, axis = 1) / W_L2_sum
return s
def lsfs(XL, YL, XU, output_file_name="feature_order"):
start_time = time.clock()
X, Y, YU, W, cur_f = get_new_X_Y_YU_W_f(XL.T, YL, XL, YL, XU)
print_W(W)
NITER = 100
epsilon = 10**-8
for i in range(NITER):
pre_f = cur_f
X, Y, YU, W, cur_f = get_new_X_Y_YU_W_f(X, Y, XL, YL, XU)
print_W(W)
# coverage
if abs((cur_f - pre_f) / cur_f) < epsilon:
break
s = compute_thea(W)
feature_order = list( np.argsort(s) )
feature_order = feature_order[::-1]
time_dual = time.clock() - start_time
with open(output_file_name, "w+") as result_file:
print("\n".join([str(w) for w in feature_order]), file=result_file)
return feature_order, time_dual
| gpl-3.0 |
QuantSoftware/QuantSoftwareToolkit | Examples/Basic/tutorial4.py | 5 | 2590 | '''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on January, 24, 2013
@author: Sourabh Bajaj
@contact: [email protected]
@summary: Example tutorial code.
'''
# QSTK Imports
import QSTK.qstkutil.qsdateutil as du
import QSTK.qstkutil.tsutil as tsu
import QSTK.qstkutil.DataAccess as da
# Third Party Imports
import datetime as dt
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import cPickle
def main():
''' Main Function'''
# Start and End date of the charts
dt_start = dt.datetime(2004, 1, 1)
dt_end = dt.datetime(2009, 12, 31)
# We need closing prices so the timestamp should be hours=16.
dt_timeofday = dt.timedelta(hours=16)
# Get a list of trading days between the start and the end.
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt_timeofday)
# Creating an object of the dataaccess class with Yahoo as the source.
c_dataobj = da.DataAccess('Yahoo')
# List of symbols - First 20
ls_symbols = c_dataobj.get_symbols_from_list('sp5002012')
ls_symbols = ls_symbols[:20]
ls_symbols.append('_CASH')
# Creating the first allocation row
na_vals = np.random.randint(0, 1000, len(ls_symbols))
# Normalize the row - Typecasting as everything is int.
na_vals = na_vals / float(sum(na_vals))
# Reshape to a 2D matrix to append into dataframe.
na_vals = na_vals.reshape(1, -1)
# Creating Allocation DataFrames
df_alloc = pd.DataFrame(na_vals, index=[ldt_timestamps[0]],
columns=ls_symbols)
dt_last_date = ldt_timestamps[0]
# Looping through all dates and creating monthly allocations
for dt_date in ldt_timestamps[1:]:
if dt_last_date.month != dt_date.month:
# Create allocation
na_vals = np.random.randint(0, 1000, len(ls_symbols))
na_vals = na_vals / float(sum(na_vals))
na_vals = na_vals.reshape(1, -1)
# Append to the dataframe
df_new_row = pd.DataFrame(na_vals, index=[dt_date],
columns=ls_symbols)
df_alloc = df_alloc.append(df_new_row)
dt_last_date = dt_date
# Create the outpul pickle file for the dataframe.
output = open('allocation.pkl', 'wb')
cPickle.dump(df_alloc, output)
if __name__ == '__main__':
main()
| bsd-3-clause |
upliftaero/MissionPlanner | Lib/site-packages/numpy/lib/function_base.py | 53 | 108301 | __docformat__ = "restructuredtext en"
__all__ = ['select', 'piecewise', 'trim_zeros', 'copy', 'iterable',
'percentile', 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex',
'disp', 'extract', 'place', 'nansum', 'nanmax', 'nanargmax',
'nanargmin', 'nanmin', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
'meshgrid', 'delete', 'insert', 'append', 'interp']
import warnings
import types
import sys
import numpy.core.numeric as _nx
from numpy.core import linspace
from numpy.core.numeric import ones, zeros, arange, concatenate, array, \
asarray, asanyarray, empty, empty_like, ndarray, around
from numpy.core.numeric import ScalarType, dot, where, newaxis, intp, \
integer, isscalar
from numpy.core.umath import pi, multiply, add, arctan2, \
frompyfunc, isnan, cos, less_equal, sqrt, sin, mod, exp, log10
from numpy.core.fromnumeric import ravel, nonzero, choose, sort, mean
from numpy.core.numerictypes import typecodes, number
from numpy.core import atleast_1d, atleast_2d
from numpy.lib.twodim_base import diag
if sys.platform != 'cli':
from _compiled_base import _insert, add_docstring
from _compiled_base import digitize, bincount, interp as compiled_interp
else:
from _compiled_base import _insert, bincount
# TODO: Implement these
def add_docstring(*args, **kw):
pass
def digitize(*args, **kw):
raise NotImplementedError()
def compiled_interp(*args, **kw):
raise NotImplementedError()
from arraysetops import setdiff1d
from utils import deprecate
import numpy as np
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try: iter(y)
except: return 0
return 1
def histogram(a, bins=10, range=None, normed=False, weights=None):
"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored.
normed : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in `a`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1
Returns
-------
hist : array
The values of the histogram. See `normed` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the
second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes*
4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), normed=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, normed=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
if (range is not None):
mn, mx = range
if (mn > mx):
raise AttributeError(
'max must be larger than min in range parameter.')
if not iterable(bins):
if range is None:
range = (a.min(), a.max())
mn, mx = [mi+0.0 for mi in range]
if mn == mx:
mn -= 0.5
mx += 0.5
bins = linspace(mn, mx, bins+1, endpoint=True)
uniform = True
else:
bins = asarray(bins)
uniform = False
if (np.diff(bins) < 0).any():
raise AttributeError(
'bins must increase monotonically.')
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = int
else:
ntype = weights.dtype
n = np.zeros(bins.shape, ntype)
block = 65536
if weights is None:
for i in arange(0, len(a), block):
sa = sort(a[i:i+block])
n += np.r_[sa.searchsorted(bins[:-1], 'left'), \
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), block):
tmp_a = a[i:i+block]
tmp_w = weights[i:i+block]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero,], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'), \
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if normed:
db = array(np.diff(bins), float)
if not uniform:
warnings.warn("""
This release of NumPy fixes a normalization bug in histogram
function occuring with non-uniform bin widths. The returned
value is now a density: n / (N * bin width), where n is the
bin count and N the total number of points.
""")
return n/db/n.sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitely in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : boolean, optional
If False, returns the number of samples in each bin. If True, returns
the bin density, ie, the bin count divided by the bin hypervolume.
weights : array_like (N,), optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False, the
values of the returned histogram are equal to the sum of the weights
belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights for
the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1D histogram
histogram2d: 2D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise AttributeError(
'The dimension of bins must be equal'\
' to the dimension of the sample x.')
except TypeError:
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1)
else:
edges[i] = asarray(bins[i], float)
nbin[i] = len(edges[i])+1 # +1 for outlier bins
dedges[i] = diff(edges[i])
nbin = asarray(nbin)
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:,i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
outliers = zeros(N, int)
for i in arange(D):
# Rounding precision
decimal = int(-log10(dedges[i].min())) +6
# Find which points are on the rightmost edge.
on_edge = where(around(sample[:,i], decimal) == around(edges[i][-1],
decimal))[0]
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
shape = []
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i,j)
ni[i],ni[j] = ni[j],ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1,-1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : {array_type, double}
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
if not isinstance(a, np.matrix) :
a = np.asarray(a)
if weights is None :
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else :
a = a + 0.0
wgt = np.array(weights, dtype=a.dtype, copy=0)
# Sanity checks
if a.shape != wgt.shape :
if axis is None :
raise TypeError(
"Axis must be specified when shapes of a "\
"and weights differ.")
if wgt.ndim != 1 :
raise TypeError(
"1D weights expected when shapes of a and "\
"weights differ.")
if wgt.shape[0] != a.shape[axis] :
raise ValueError(
"Length of weights not compatible with "\
"specified axis.")
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis)
scl = wgt.sum(axis=axis)
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt).sum(axis)/scl
if returned:
scl = np.multiply(avg, 0) + scl
return avg, scl
else:
return avg
def asarray_chkfinite(a):
"""
Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a)
array([1, 2])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print 'ValueError'
...
ValueError
"""
a = asarray(a)
if (a.dtype.char in typecodes['AllFloat']) \
and (_nx.isnan(a).any() or _nx.isinf(a).any()):
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., lambda=1)``, then each function is called as
``f(x, lambda=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have undefined values.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.arange(6) - 2.5
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if isscalar(condlist) or \
not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray)):
condlist = [condlist]
condlist = [asarray(c, dtype=bool) for c in condlist]
n = len(condlist)
if n == n2-1: # compute the "otherwise" condition.
totlist = condlist[0]
for k in range(1, n):
totlist |= condlist[k]
condlist.append(~totlist)
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
zerod = False
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
if x.ndim == 0:
x = x[None]
zerod = True
newcondlist = []
for k in range(n):
if condlist[k].ndim == 0:
condition = condlist[k][None]
else:
condition = condlist[k]
newcondlist.append(condition)
condlist = newcondlist
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not callable(item):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
n = len(condlist)
n2 = len(choicelist)
if n2 != n:
raise ValueError(
"list of cases must be same length as list of conditions")
choicelist = [default] + choicelist
S = 0
pfac = 1
for k in range(1, n+1):
S += k * pfac * asarray(condlist[k-1])
if k < n:
pfac *= (1-asarray(condlist[k-1]))
# handle special case of a 1-element condition but
# a multi-element choice
if type(S) in ScalarType or max(asarray(S).shape)==1:
pfac = asarray(1)
for k in range(n2+1):
pfac = pfac + asarray(choicelist[k])
if type(S) in ScalarType:
S = S*ones(asarray(pfac).shape, type(S))
else:
S = S*ones(asarray(pfac).shape, S.dtype)
return choose(S, tuple(choicelist))
def copy(a):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, copy=True)
# Basic operations
def gradient(f, *varargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using central differences in the interior
and first differences at the boundaries. The returned gradient hence has
the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
`*varargs` : scalars
0, 1, or N scalars specifying the sample distances in each direction,
that is: `dx`, `dy`, `dz`, ... The default distance is 1.
Returns
-------
g : ndarray
N arrays of the same shape as `f` giving the derivative of `f` with
respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]),
array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
"""
N = len(f.shape) # number of dimensions
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == N:
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
# use central differences on interior and first differences on endpoints
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D']:
otype = 'd'
for axis in range(N):
# select out appropriate parts for this dimension
out = np.zeros_like(f).astype(otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (f[2:] - f[:-2])/2.0
out[slice1] = (f[slice2] - f[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (f[1] - f[0])
out[slice1] = (f[slice2] - f[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (f[-1] - f[-2])
out[slice1] = (f[slice2] - f[slice3])
# divide by step size
outvals.append(out / dx[axis])
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
if N == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th order discrete difference along given axis.
The first order difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher order differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
out : ndarray
The `n` order differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
See Also
--------
gradient, ediff1d
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
left : float, optional
Value to return for `x < xp[0]`, default is `fp[0]`.
right : float, optional
Value to return for `x > xp[-1]`, defaults is `fp[-1]`.
Returns
-------
y : {float, ndarray}
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasingness is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
"""
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : {ndarray, scalar}
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd+pi, 2*pi)-pi
_nx.putmask(ddmod, (ddmod==-pi) & (dd > 0), pi)
ph_correct = ddmod - dd;
_nx.putmask(ph_correct, abs(dd)<discont, 0)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a,copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.: break
else: first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.: break
else: last = last - 1
return filt[first:last]
import sys
if sys.hexversion < 0x2040000:
from sets import Set as set
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True],tmp[1:]!=tmp[:-1]))
return tmp[idx]
except AttributeError:
items = list(set(x))
items.sort()
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
See Also
--------
take, put, putmask, compress
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.putmask(arr, mask, vals)``, the difference is that `place`
uses the first N elements of `vals`, where N is the number of True values
in `mask`, while `putmask` uses the elements where `mask` is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : array_like
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
putmask, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
return _insert(arr, mask, vals)
def _nanop(op, fill, a, axis=None):
"""
General operation on arrays with not-a-number values.
Parameters
----------
op : callable
Operation to perform.
fill : float
NaN values are set to fill before doing the operation.
a : array-like
Input array.
axis : {int, None}, optional
Axis along which the operation is computed.
By default the input is flattened.
Returns
-------
y : {ndarray, scalar}
Processed data.
"""
y = array(a, subok=True)
# We only need to take care of NaN's in floating point arrays
if np.issubdtype(y.dtype, np.integer):
return op(y, axis=axis)
mask = isnan(a)
# y[mask] = fill
# We can't use fancy indexing here as it'll mess w/ MaskedArrays
# Instead, let's fill the array directly...
np.putmask(y, mask, fill)
res = op(y, axis=axis)
mask_all_along_axis = mask.all(axis=axis)
# Along some axes, only nan's were encountered. As such, any values
# calculated along that axis should be set to nan.
if mask_all_along_axis.any():
if np.isscalar(res):
res = np.nan
else:
res[mask_all_along_axis] = np.nan
return res
def nansum(a, axis=None):
"""
Return the sum of array elements over a given axis treating
Not a Numbers (NaNs) as zero.
Parameters
----------
a : array_like
Array containing numbers whose sum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the sum is computed. The default is to compute
the sum of the flattened array.
Returns
-------
y : ndarray
An array with the same shape as a, with the specified axis removed.
If a is a 0-d array, or if axis is None, a scalar is returned with
the same dtype as `a`.
See Also
--------
numpy.sum : Sum across array including Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
If positive or negative infinity are present the result is positive or
negative infinity. But if both positive and negative infinity are present,
the result is Not A Number (NaN).
Arithmetic is modular when using integer types (all elements of `a` must
be finite i.e. no elements that are NaNs, positive infinity and negative
infinity because NaNs are floating point types), and no error is raised
on overflow.
Examples
--------
>>> np.nansum(1)
1
>>> np.nansum([1])
1
>>> np.nansum([1, np.nan])
1.0
>>> a = np.array([[1, 1], [1, np.nan]])
>>> np.nansum(a)
3.0
>>> np.nansum(a, axis=0)
array([ 2., 1.])
When positive infinity and negative infinity are present
>>> np.nansum([1, np.nan, np.inf])
inf
>>> np.nansum([1, np.nan, np.NINF])
-inf
>>> np.nansum([1, np.nan, np.inf, np.NINF])
nan
"""
return _nanop(np.sum, 0, a, axis)
def nanmin(a, axis=None):
"""
Return the minimum of an array or minimum along an axis ignoring any NaNs.
Parameters
----------
a : array_like
Array containing numbers whose minimum is desired.
axis : int, optional
Axis along which the minimum is computed.The default is to compute
the minimum of the flattened array.
Returns
-------
nanmin : ndarray
A new array or a scalar array with the result.
See Also
--------
numpy.amin : Minimum across array including any Not a Numbers.
numpy.nanmax : Maximum across array ignoring any Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative infinity
is treated as a very small (i.e. negative) number.
If the input has a integer type, an integer type is returned unless
the input contains NaNs and infinity.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmin(a)
1.0
>>> np.nanmin(a, axis=0)
array([ 1., 2.])
>>> np.nanmin(a, axis=1)
array([ 1., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmin([1, 2, np.nan, np.inf])
1.0
>>> np.nanmin([1, 2, np.nan, np.NINF])
-inf
"""
return _nanop(np.min, np.inf, a, axis)
def nanargmin(a, axis=None):
"""
Return indices of the minimum values over an axis, ignoring NaNs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmin, nanargmax
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmin(a)
0
>>> np.nanargmin(a)
2
>>> np.nanargmin(a, axis=0)
array([1, 1])
>>> np.nanargmin(a, axis=1)
array([1, 0])
"""
return _nanop(np.argmin, np.inf, a, axis)
def nanmax(a, axis=None):
"""
Return the maximum of an array or maximum along an axis ignoring any NaNs.
Parameters
----------
a : array_like
Array containing numbers whose maximum is desired. If `a` is not
an array, a conversion is attempted.
axis : int, optional
Axis along which the maximum is computed. The default is to compute
the maximum of the flattened array.
Returns
-------
nanmax : ndarray
An array with the same shape as `a`, with the specified axis removed.
If `a` is a 0-d array, or if axis is None, a ndarray scalar is
returned. The the same dtype as `a` is returned.
See Also
--------
numpy.amax : Maximum across array including any Not a Numbers.
numpy.nanmin : Minimum across array ignoring any Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative infinity
is treated as a very small (i.e. negative) number.
If the input has a integer type, an integer type is returned unless
the input contains NaNs and infinity.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmax(a)
3.0
>>> np.nanmax(a, axis=0)
array([ 3., 2.])
>>> np.nanmax(a, axis=1)
array([ 2., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmax([1, 2, np.nan, np.NINF])
2.0
>>> np.nanmax([1, 2, np.nan, np.inf])
inf
"""
return _nanop(np.max, -np.inf, a, axis)
def nanargmax(a, axis=None):
"""
Return indices of the maximum values over an axis, ignoring NaNs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmax, nanargmin
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmax(a)
0
>>> np.nanargmax(a)
1
>>> np.nanargmax(a, axis=0)
array([1, 0])
>>> np.nanargmax(a, axis=1)
array([1, 1])
"""
return _nanop(np.argmax, -np.inf, a, axis)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
import sys
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
# return number of input arguments and
# number of default arguments
def _get_nargs(obj):
import re
terr = re.compile(r'.*? takes (exactly|at least) (?P<exargs>(\d+)|(\w+))' +
r' argument(s|) \((?P<gargs>(\d+)|(\w+)) given\)')
def _convert_to_int(strval):
try:
result = int(strval)
except ValueError:
if strval=='zero':
result = 0
elif strval=='one':
result = 1
elif strval=='two':
result = 2
# How high to go? English only?
else:
raise
return result
if not callable(obj):
raise TypeError(
"Object is not callable.")
if sys.version_info[0] >= 3:
# inspect currently fails for binary extensions
# like math.cos. So fall back to other methods if
# it fails.
import inspect
try:
spec = inspect.getargspec(obj)
nargs = len(spec.args)
if spec.defaults:
ndefaults = len(spec.defaults)
else:
ndefaults = 0
if inspect.ismethod(obj):
nargs -= 1
return nargs, ndefaults
except:
pass
if hasattr(obj,'func_code'):
fcode = obj.func_code
nargs = fcode.co_argcount
if obj.func_defaults is not None:
ndefaults = len(obj.func_defaults)
else:
ndefaults = 0
if isinstance(obj, types.MethodType):
nargs -= 1
return nargs, ndefaults
try:
obj()
return 0, 0
except TypeError, msg:
m = terr.match(str(msg))
if m:
nargs = _convert_to_int(m.group('exargs'))
ndefaults = _convert_to_int(m.group('gargs'))
if isinstance(obj, types.MethodType):
nargs -= 1
return nargs, ndefaults
raise ValueError(
"failed to determine the number of arguments for %s" % (obj))
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If None, the docstring will be the
`pyfunc` one.
Examples
--------
>>> def myfunc(a, b):
... \"\"\"Return a-b if a>b, otherwise return a+b\"\"\"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
"""
def __init__(self, pyfunc, otypes='', doc=None):
self.thefunc = pyfunc
self.ufunc = None
nin, ndefault = _get_nargs(pyfunc)
if nin == 0 and ndefault == 0:
self.nin = None
self.nin_wo_defaults = None
else:
self.nin = nin
self.nin_wo_defaults = nin - ndefault
self.nout = None
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError(
"invalid otype specified")
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError(
"Invalid otype specification")
self.lastcallargs = 0
def __call__(self, *args):
# get number of outputs and output types by calling
# the function on the first entries of args
nargs = len(args)
if self.nin:
if (nargs > self.nin) or (nargs < self.nin_wo_defaults):
raise ValueError(
"Invalid number of arguments")
# we need a new ufunc if this is being called with more arguments.
if (self.lastcallargs != nargs):
self.lastcallargs = nargs
self.ufunc = None
self.nout = None
if self.nout is None or self.otypes == '':
newargs = []
for arg in args:
newargs.append(asarray(arg).flat[0])
theout = self.thefunc(*newargs)
if isinstance(theout, tuple):
self.nout = len(theout)
else:
self.nout = 1
theout = (theout,)
if self.otypes == '':
otypes = []
for k in range(self.nout):
otypes.append(asarray(theout[k]).dtype.char)
self.otypes = ''.join(otypes)
# Create ufunc if not already created
if (self.ufunc is None):
self.ufunc = frompyfunc(self.thefunc, nargs, self.nout)
# Convert to object arrays first
newargs = [array(arg,copy=False,subok=True,dtype=object) for arg in args]
if self.nout == 1:
_res = array(self.ufunc(*newargs),copy=False,
subok=True,dtype=self.otypes[0])
else:
_res = tuple([array(x,copy=False,subok=True,dtype=c) \
for x, c in zip(self.ufunc(*newargs), self.otypes)])
return _res
def cov(m, y=None, rowvar=1, bias=0, ddof=None):
"""
Estimate a covariance matrix, given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
form as that of `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print np.cov(X)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x, y)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x)
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError("ddof must be integer")
X = array(m, ndmin=2, dtype=float)
if X.shape[0] == 1:
rowvar = 1
if rowvar:
axis = 0
tup = (slice(None),newaxis)
else:
axis = 1
tup = (newaxis, slice(None))
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=float)
X = concatenate((X,y), axis)
X -= X.mean(axis=1-axis)[tup]
if rowvar:
N = X.shape[1]
else:
N = X.shape[0]
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
fact = float(N - ddof)
if not rowvar:
return (dot(X.T, X.conj()) / fact).squeeze()
else:
return (dot(X, X.T.conj()) / fact).squeeze()
def corrcoef(x, y=None, rowvar=1, bias=0, ddof=None):
"""
Return correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `P`, and the
covariance matrix, `C`, is
.. math:: P_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `P` are between -1 and 1, inclusive.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : {None, int}, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
"""
c = cov(x, y, rowvar, bias, ddof)
try:
d = diag(c)
except ValueError: # scalar covariance
return 1
return c/sqrt(multiply.outer(d,d))
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, normalized to one (the value one appears only if the
number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> from numpy import blackman
>>> blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy import clip, log10, array, blackman, linspace
>>> from numpy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = abs(fftshift(A))
>>> freq = linspace(-0.5,0.5,len(A))
>>> response = 20*log10(mag)
>>> response = clip(response,-100,100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return 0.42-0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, normalized to one (the value one
appears only if the number of samples is odd), with the first
and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy import clip, log10, array, bartlett, linspace
>>> from numpy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = abs(fftshift(A))
>>> freq = linspace(-0.5,0.5,len(A))
>>> response = 20*log10(mag)
>>> response = clip(response,-100,100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return where(less_equal(n,(M-1)/2.0),2.0*n/(M-1),2.0-2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, normalized to one (the value one
appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius van Hann, an Austrian meterologist. It is
also known as the Cosine Bell. Some authors prefer that it be called a
Hann window, to help avoid confusion with the very similar Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> from numpy import hanning
>>> hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = abs(fftshift(A))
>>> freq = np.linspace(-0.5,0.5,len(A))
>>> response = 20*np.log10(mag)
>>> response = np.clip(response,-100,100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
# XXX: this docstring is inconsistent with other filter windows, e.g.
# Blackman and Bartlett - they should all follow the same convention for
# clarity. Either use np. for all numpy members (as above), or import all
# numpy members (as in Blackman and Bartlett examples)
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return 0.5-0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 + 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and
is described in Blackman and Tukey. It was recommended for smoothing the
truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1,float)
n = arange(0,M)
return 0.54-0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in xrange(1,len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is partitioned
into the two intervals [0,8] and (8,inf), and Chebyshev polynomial
expansions are employed in each interval. Relative error on the domain
[0,30] using IEEE arithmetic is documented [3]_ as having a peak of 5.8e-16
with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions," in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x<0)
x[ind] = -x[ind]
ind = (x<=8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M,beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple approximation
to the DPSS window based on Bessel functions.
The Kaiser window is a very good approximation to the Digital Prolate
Spheroidal Sequence, or Slepian window, which is the transform which
maximizes the energy in the main lobe of the window relative to total
energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise nans will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> from numpy import kaiser
>>> kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy import clip, log10, array, kaiser, linspace
>>> from numpy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = abs(fftshift(A))
>>> freq = linspace(-0.5,0.5,len(A))
>>> response = 20*log10(mag)
>>> response = clip(response,-100,100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0,M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a
Lanczos resampling filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.arange(-20., 21.)/5.
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.arange(-200., 201.)/50.
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi* where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a,subok=True,copy=True)
b.sort(0)
return b
def median(a, axis=None, out=None, overwrite_input=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : {None, int}, optional
Axis along which the medians are computed. The default (axis=None)
is to compute the median along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : {False, True}, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. Note that, if `overwrite_input` is True and the input
is not already an ndarray, an error will be raised.
Returns
-------
median : ndarray
A new array holding the result (unless `out` is specified, in
which case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, percentile
Notes
-----
Given a vector V of length N, the median of V is the middle value of
a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is
odd. When N is even, it is the average of the two middle values of
``V_sorted``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
if overwrite_input:
if axis is None:
sorted = a.ravel()
sorted.sort()
else:
a.sort(axis=axis)
sorted = a
else:
sorted = sort(a, axis=axis)
if axis is None:
axis = 0
indexer = [slice(None)] * sorted.ndim
index = int(sorted.shape[axis]/2)
if sorted.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(sorted[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None, overwrite_input=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
percentile to compute which must be between 0 and 100 inclusive
axis : {None, int}, optional
Axis along which the percentiles are computed. The default (axis=None)
is to compute the median along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : {False, True}, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. Note that, if `overwrite_input` is True and the input
is not already an ndarray, an error will be raised.
Returns
-------
pcntile : ndarray
A new array holding the result (unless `out` is specified, in
which case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, median
Notes
-----
Given a vector V of length N, the qth percentile of V is the qth ranked
value in a sorted copy of V. A weighted average of the two nearest neighbors
is used if the normalized ranking does not match q exactly.
The same as the median if q is 0.5; the same as the min if q is 0;
and the same as the max if q is 1
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 0.5)
3.5
>>> np.percentile(a, 0.5, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.percentile(a, 0.5, axis=1)
array([ 7., 2.])
>>> m = np.percentile(a, 0.5, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 0.5, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.percentile(b, 0.5, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.percentile(b, 0.5, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
a = np.asarray(a)
if q == 0:
return a.min(axis=axis, out=out)
elif q == 100:
return a.max(axis=axis, out=out)
if overwrite_input:
if axis is None:
sorted = a.ravel()
sorted.sort()
else:
a.sort(axis=axis)
sorted = a
else:
sorted = sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted, q, axis, out)
# handle sequence of q's without calling sort multiple times
def _compute_qth_percentile(sorted, q, axis, out):
if not isscalar(q):
p = [_compute_qth_percentile(sorted, qi, axis, None)
for qi in q]
if out is not None:
out.flat = p
return p
q = q / 100.0
if (q < 0) or (q > 1):
raise ValueError, "percentile must be either in the range [0,100]"
indexer = [slice(None)] * sorted.ndim
Nx = sorted.shape[axis]
index = q*(Nx-1)
i = int(index)
if i == index:
indexer[axis] = slice(i, i+1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i+2)
j = i + 1
weights = array([(j - index), (index - i)],float)
wshape = [1]*sorted.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use add.reduce in both cases to coerce data type as well as
# check and use out array.
return add.reduce(sorted[indexer]*weights, axis=axis, out=out)/sumval
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
If `x` is None, then spacing between all `y` elements is `dx`.
dx : scalar, optional
If `x` is None, spacing given by `dx` is assumed. Default is 1.
axis : int, optional
Specify the axis.
Returns
-------
out : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points will
be taken from `y` array, by default x-axis distances between points will be
1.0, alternatively they can be provided with `x` array or with `dx` scalar.
Return value will be equal to combined area under the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1,None)
slice2[axis] = slice(None,-1)
try:
ret = (d * (y[slice1] +y [slice2]) / 2.0).sum(axis)
except ValueError: # Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
"""
try:
new = {}
exec 'from %s import %s' % (place, obj) in new
if isinstance(doc, str):
add_docstring(new[obj], doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new[obj], doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new[obj], val[0]), val[1].strip())
except:
pass
# From matplotlib
def meshgrid(x,y):
"""
Return coordinate matrices from two coordinate vectors.
Parameters
----------
x, y : ndarray
Two 1-D arrays representing the x and y coordinates of a grid.
Returns
-------
X, Y : ndarray
For vectors `x`, `y` with lengths ``Nx=len(x)`` and ``Ny=len(y)``,
return `X`, `Y` where `X` and `Y` are ``(Ny, Nx)`` shaped arrays
with the elements of `x` and y repeated to fill the matrix along
the first dimension for `x`, the second for `y`.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> X, Y = np.meshgrid([1,2,3], [4,5,6,7])
>>> X
array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
>>> Y
array([[4, 4, 4],
[5, 5, 5],
[6, 6, 6],
[7, 7, 7]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = np.meshgrid(x, y)
>>> z = np.sin(xx**2+yy**2)/(xx**2+yy**2)
"""
x = asarray(x)
y = asarray(y)
numRows, numCols = len(y), len(x) # yes, reversed
x = x.reshape(1,numCols)
X = x.repeat(numRows, axis=0)
y = y.reshape(numRows,1)
Y = y.repeat(numCols, axis=1)
return X, Y
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim;
axis = ndim-1;
if ndim == 0:
if wrap:
return wrap(arr)
else:
return arr.copy()
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, (int, long, integer)):
if (obj < 0): obj += N
if (obj < 0 or obj >=N):
raise ValueError(
"invalid entry")
newshape[axis]-=1;
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj,None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1,None)
new[slobj] = arr[slobj2]
elif isinstance(obj, slice):
start, stop, step = obj.indices(N)
numtodel = len(xrange(start, stop, step))
if numtodel <= 0:
if wrap:
return wrap(new)
else:
return arr.copy()
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arr.flags.fnc)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel,None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
obj = arange(start, stop, step, dtype=intp)
all = arange(start, stop, dtype=intp)
obj = setdiff1d(all, obj)
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = obj
new[slobj] = arr[slobj2]
else: # default behavior
obj = array(obj, dtype=intp, copy=0, ndmin=1)
all = arange(N, dtype=intp)
obj = setdiff1d(all, obj)
slobj[axis] = obj
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
delete : Delete elements from an array.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim-1
if (ndim == 0):
arr = arr.copy()
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, (int, long, integer)):
if (obj < 0): obj += N
if obj < 0 or obj > N:
raise ValueError(
"index (%d) out of range (0<=index<=%d) "\
"in dimension %d" % (obj, N, axis))
newshape[axis] += 1;
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = obj
new[slobj] = values
slobj[axis] = slice(obj+1,None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj,None)
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
return new
elif isinstance(obj, slice):
# turn it into a range object
obj = arange(*obj.indices(N),**{'dtype':intp})
# get two sets of indices
# one is the indices which will hold the new stuff
# two is the indices where arr will be copied over
obj = asarray(obj, dtype=intp)
numnew = len(obj)
index1 = obj + arange(numnew)
index2 = setdiff1d(arange(numnew+N),index1)
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj2 = [slice(None)]*ndim
slobj[axis] = index1
slobj2[axis] = index2
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If `axis`
is not specified, `values` can be any shape and will be flattened
before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not given,
both `arr` and `values` are flattened before use.
Returns
-------
out : ndarray
A copy of `arr` with `values` appended to `axis`. Note that `append`
does not occur in-place: a new array is allocated and filled. If
`axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
| gpl-3.0 |
taxipp/ipp-macro-series-parser | ipp_macro_series_parser/demographie/dependance.py | 2 | 2651 | # -*- coding:utf-8 -*-
from __future__ import division
import logging
import numpy as np
import os
import pandas as pd
from liam2.importer import array_to_disk_array
from ipp_macro_series_parser.scripts.utils import line_prepender
log = logging.getLogger(__name__)
def check_directory_existence(directory):
if not os.path.exists(directory):
log.info('Creating directory {}'.format(directory))
os.makedirs(directory)
def build_prevalence_2010(input_dir = None, output_dir = None, uniform_weight = None,
drees_filename = 'dss43_horizon_2060.xls', output_filename = 'dependance_prevalence_2010.csv'):
data_path = os.path.join(input_dir, drees_filename)
data = pd.read_excel(
data_path,
sheetname ='Tab2',
header = 3,
parse_cols = 'B:O',
skip_footer = 4
)
columns_to_delete = [
column for column in data.columns if column.startswith('Unnamed') or column.startswith('Ensemble')]
for column in columns_to_delete:
del data[column]
data.index = [index.year for index in data.index]
data.columns = range(1, 7)
check_directory_existence(output_dir)
csv_file_path = os.path.join(output_dir, output_filename)
data = pd.DataFrame(data.xs(2010)).T
data = np.round(data / uniform_weight)
data.astype(int).to_csv(csv_file_path, index = False)
line_prepender(csv_file_path, 'age_category')
def build_prevalence_all_years(globals_node = None, output_dir = None, input_dir = None, to_csv = None,
drees_filename = 'dss43_horizon_2060.xls'):
assert globals_node or to_csv
data_path = os.path.join(
input_dir,
drees_filename)
data = pd.read_excel(
data_path,
sheetname ='Tab6A',
header = 3,
parse_cols = 'B:E',
skip_footer = 3
)
# "Au 1er janvier"
data.columns = ['year', 'dependants_optimiste', 'DEPENDANTS', 'dependants_pessimiste']
data.set_index('year', inplace = True)
data = data.reindex(index = range(2010, 2061)).interpolate(method = 'polynomial', order = 7)
data.index = [int(str(year - 1)) for year in data.index]
data.index.name = "PERIOD"
if globals_node:
array_to_disk_array(
globals_node,
'dependance_prevalence_all_years',
data.DEPENDANTS.values
)
elif to_csv:
check_directory_existence(output_dir)
csv_file_path = os.path.join(output_dir, 'dependance_prevalence_all_years.csv')
data = data.reset_index()[['PERIOD', 'DEPENDANTS']]
data.astype(int) \
.to_csv(csv_file_path, index = False)
| gpl-3.0 |
aminert/scikit-learn | examples/neighbors/plot_species_kde.py | 282 | 4059 | """
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`example_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
Xtrain = np.vstack([data['train']['dd lat'],
data['train']['dd long']]).T
ytrain = np.array([d.decode('ascii').startswith('micro')
for d in data['train']['species']], dtype='int')
Xtrain *= np.pi / 180. # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(bandwidth=0.04, metric='haversine',
kernel='gaussian', algorithm='ball_tree')
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = -9999 + np.zeros(land_mask.shape[0])
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
| bsd-3-clause |
zuku1985/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 33 | 20167 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hierarchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hierarchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering_wrong_arg_memory():
# Test either if an error is raised when memory is not
# either a str or a joblib.Memory instance
rng = np.random.RandomState(0)
n_samples = 100
X = rng.randn(n_samples, 50)
memory = 5
clustering = AgglomerativeClustering(memory=memory)
assert_raises(ValueError, clustering.fit, X)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
| bsd-3-clause |
pizzathief/numpy | numpy/lib/polynomial.py | 4 | 40727 | """
Functions to operate on polynomials.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import functools
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
ones)
from numpy.core import overrides
from numpy.core.overrides import set_module
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros
from numpy.lib.type_check import iscomplex, real, imag, mintypecode
from numpy.linalg import eigvals, lstsq, inv
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
@set_module('numpy')
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def _poly_dispatcher(seq_of_zeros):
return seq_of_zeros
@array_function_dispatch(_poly_dispatcher)
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Compute polynomial values.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1., 0., 0., 0.])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1)[0], 0, np.random.random(1)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) # random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
dt = seq_of_zeros.dtype
# Let object arrays slip through, e.g. for arbitrary precision
if dt != object:
seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))
else:
raise ValueError("input must be 1d or non-empty square 2d array.")
if len(seq_of_zeros) == 0:
return 1.0
dt = seq_of_zeros.dtype
a = ones((1,), dtype=dt)
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt),
mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())):
a = a.real.copy()
return a
def _roots_dispatcher(p):
return p
@array_function_dispatch(_roots_dispatcher)
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by::
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like
Rank-1 array of polynomial coefficients.
Returns
-------
out : ndarray
An array containing the roots of the polynomial.
Raises
------
ValueError
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with a given sequence
of roots.
polyval : Compute polynomial values.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if p.ndim != 1:
raise ValueError("Input must be a rank-1 array.")
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0,:] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def _polyint_dispatcher(p, m=None, k=None):
return (p,)
@array_function_dispatch(_polyint_dispatcher)
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : array_like or poly1d
Polynomial to integrate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : list of `m` scalars or scalar, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ]) # may vary
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) # may vary
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError("Order of integral must be positive (see polyder)")
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError(
"k must be a scalar or a rank-1 array of length 1 or >m.")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def _polyder_dispatcher(p, m=None):
return (p,)
@array_function_dispatch(_polyder_dispatcher)
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([0.])
"""
m = int(m)
if m < 0:
raise ValueError("Order of derivative must be positive (see polyint)")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def _polyfit_dispatcher(x, y, deg, rcond=None, full=None, w=None, cov=None):
return (x, y, w)
@array_function_dispatch(_polyfit_dispatcher)
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error in the order `deg`, `deg-1`, ... `0`.
The `Polynomial.fit <numpy.polynomial.polynomial.Polynomial.fit>` class
method is recommended for new code as it is more stable numerically. See
the documentation of the method for more information.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (M,), optional
Weights to apply to the y-coordinates of the sample points. For
gaussian uncertainties, use 1/sigma (not 1/sigma**2).
cov : bool or str, optional
If given and not `False`, return not just the estimate but also its
covariance matrix. By default, the covariance are scaled by
chi2/sqrt(N-dof), i.e., the weights are presumed to be unreliable
except in a relative sense and everything is scaled such that the
reduced chi2 is unity. This scaling is omitted if ``cov='unscaled'``,
as is relevant for the case that the weights are 1/sigma**2, with
sigma known to be a reliable estimate of the uncertainty.
Returns
-------
p : ndarray, shape (deg + 1,) or (deg + 1, K)
Polynomial coefficients, highest power first. If `y` was 2-D, the
coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond
Present only if `full` = True. Residuals of the least-squares fit,
the effective rank of the scaled Vandermonde coefficient matrix,
its singular values, and the specified value of `rcond`. For more
details, see `linalg.lstsq`.
V : ndarray, shape (M,M) or (M,M,K)
Present only if `full` = False and `cov`=True. The covariance
matrix of the polynomial coefficient estimates. The diagonal of
this matrix are the variance estimates for each coefficient. If y
is a 2-D array, then the covariance matrix for the `k`-th data set
are in ``V[:,:,k]``
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Compute polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
...
x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
https://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
https://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> import warnings
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) # may vary
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179 # may vary
>>> p(3.5)
-0.34732142857143039 # may vary
>>> p(10)
22.579365079365115 # may vary
High-order polynomials may oscillate wildly:
>>> with warnings.catch_warnings():
... warnings.simplefilter('ignore', np.RankWarning)
... p30 = np.poly1d(np.polyfit(x, y, 30))
...
>>> p30(4)
-0.80000000000000204 # may vary
>>> p30(5)
-0.99999999999999445 # may vary
>>> p30(4.5)
-0.10547061179440398 # may vary
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if x.shape[0] != y.shape[0]:
raise TypeError("expected x and y to have same length")
# set rcond
if rcond is None:
rcond = len(x)*finfo(x.dtype).eps
# set up least squares equation for powers of x
lhs = vander(x, order)
rhs = y
# apply weighting
if w is not None:
w = NX.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
rhs *= w[:, NX.newaxis]
else:
rhs *= w
# scale lhs to improve condition number and solve
scale = NX.sqrt((lhs*lhs).sum(axis=0))
lhs /= scale
c, resids, rank, s = lstsq(lhs, rhs, rcond)
c = (c.T/scale).T # broadcast scale coefficients
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning, stacklevel=4)
if full:
return c, resids, rank, s, rcond
elif cov:
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
if cov == "unscaled":
fac = 1
else:
if len(x) <= order:
raise ValueError("the number of data points must exceed order "
"to scale the covariance matrix")
# note, this used to be: fac = resids / (len(x) - order - 2.0)
# it was deciced that the "- 2" (originally justified by "Bayesian
# uncertainty analysis") is not was the user expects
# (see gh-11196 and gh-11197)
fac = resids / (len(x) - order)
if y.ndim == 1:
return c, Vbase * fac
else:
return c, Vbase[:,:, NX.newaxis] * fac
else:
return c
def _polyval_dispatcher(p, x):
return (p, x)
@array_function_dispatch(_polyval_dispatcher)
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, an array of numbers, or an instance of poly1d, at
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
If `x` is a subtype of `ndarray` the return value will be of the same type.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asanyarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = y * x + p[i]
return y
def _binary_op_dispatcher(a1, a2):
return (a1, a2)
@array_function_dispatch(_binary_op_dispatcher)
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print(p1)
1 x + 2
>>> print(p2)
2
9 x + 5 x + 4
>>> print(np.polyadd(p1, p2))
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
@array_function_dispatch(_binary_op_dispatcher)
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
@array_function_dispatch(_binary_op_dispatcher)
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
convolve : Array convolution. Same output as polymul, but has parameter
for overlap mode.
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print(p1)
2
1 x + 2 x + 3
>>> print(p2)
2
9 x + 5 x + 1
>>> print(np.polymul(p1, p2))
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1, a2 = poly1d(a1), poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def _polydiv_dispatcher(u, v):
return (u, v)
@array_function_dispatch(_polydiv_dispatcher)
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([1.5 , 1.75]), array([0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.astype(w.dtype)
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while True:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2) + len(toadd2) > wrap) or
(len(line1) + len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
@set_module('numpy')
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print(np.poly1d(p))
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) # may vary
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print(p)
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1., -3., 2.])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
__hash__ = None
@property
def coeffs(self):
""" The polynomial coefficients """
return self._coeffs
@coeffs.setter
def coeffs(self, value):
# allowing this makes p.coeffs *= 2 legal
if value is not self._coeffs:
raise AttributeError("Cannot set attribute")
@property
def variable(self):
""" The name of the polynomial variable """
return self._variable
# calculated attributes
@property
def order(self):
""" The order or degree of the polynomial """
return len(self._coeffs) - 1
@property
def roots(self):
""" The roots of the polynomial, where self(x) == 0 """
return roots(self._coeffs)
# our internal _coeffs property need to be backed by __dict__['coeffs'] for
# scipy to work correctly.
@property
def _coeffs(self):
return self.__dict__['coeffs']
@_coeffs.setter
def _coeffs(self, coeffs):
self.__dict__['coeffs'] = coeffs
# alias attributes
r = roots
c = coef = coefficients = coeffs
o = order
def __init__(self, c_or_r, r=False, variable=None):
if isinstance(c_or_r, poly1d):
self._variable = c_or_r._variable
self._coeffs = c_or_r._coeffs
if set(c_or_r.__dict__) - set(self.__dict__):
msg = ("In the future extra properties will not be copied "
"across when constructing one poly1d from another")
warnings.warn(msg, FutureWarning, stacklevel=2)
self.__dict__.update(c_or_r.__dict__)
if variable is not None:
self._variable = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if c_or_r.ndim > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self._coeffs = c_or_r
if variable is None:
variable = 'x'
self._variable = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError("Power to non-negative integers only.")
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
if not isinstance(other, poly1d):
return NotImplemented
if self.coeffs.shape != other.coeffs.shape:
return False
return (self.coeffs == other.coeffs).all()
def __ne__(self, other):
if not isinstance(other, poly1d):
return NotImplemented
return not self.__eq__(other)
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self._coeffs = NX.concatenate((zr, self.coeffs))
ind = 0
self._coeffs[ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always', RankWarning)
| bsd-3-clause |
xunyou/vincent | vincent/core.py | 1 | 13550 | # -*- coding: utf-8 -*-
"""
Core: The core functionality for Vincent to map to Vega grammar
"""
from __future__ import (print_function, division)
import json
from string import Template
from pkg_resources import resource_string
try:
import pandas as pd
except ImportError:
pd = None
try:
import numpy as np
except ImportError:
np = None
from ._compat import str_types
def initialize_notebook():
"""Initialize the IPython notebook display elements"""
try:
from IPython.core.display import display, HTML
except ImportError:
print("IPython Notebook could not be loaded.")
# Thanks to @jakevdp:
# https://github.com/jakevdp/mpld3/blob/master/mpld3/_display.py#L85
load_lib = """
function vct_load_lib(url, callback){
if(typeof d3 !== 'undefined' &&
url === 'http://d3js.org/d3.v3.min.js'){
callback()
}
var s = document.createElement('script');
s.src = url;
s.async = true;
s.onreadystatechange = s.onload = callback;
s.onerror = function(){
console.warn("failed to load library " + url);
};
document.getElementsByTagName("head")[0].appendChild(s);
};
var vincent_event = new CustomEvent(
"vincent_libs_loaded",
{bubbles: true, cancelable: true}
);
"""
lib_urls = [
"'http://d3js.org/d3.v3.min.js'",
"'http://d3js.org/d3.geo.projection.v0.min.js'",
"'http://wrobstory.github.io/d3-cloud/d3.layout.cloud.js'",
"'http://wrobstory.github.io/vega/vega.v1.3.3.js'"
]
get_lib = """vct_load_lib(%s, function(){
%s
});"""
load_js = get_lib
ipy_trigger = "window.dispatchEvent(vincent_event);"
for elem in lib_urls[:-1]:
load_js = load_js % (elem, get_lib)
load_js = load_js % (lib_urls[-1], ipy_trigger)
html = """
<script>
%s
function load_all_libs(){
console.log('Loading Vincent libs...')
%s
};
if(typeof define === "function" && define.amd){
if (window['d3'] === undefined ||
window['topojson'] === undefined){
require.config(
{paths: {
d3: 'http://d3js.org/d3.v3.min',
topojson: 'http://d3js.org/topojson.v1.min'
}
}
);
require(["d3"], function(d3){
console.log('Loading Vincent from require.js...')
window.d3 = d3;
require(["topojson"], function(topojson){
window.topojson = topojson;
load_all_libs();
});
});
} else {
load_all_libs();
};
}else{
console.log('Require.js not found, loading manually...')
load_all_libs();
};
</script>""" % (load_lib, load_js,)
return display(HTML(html))
def _assert_is_type(name, value, value_type):
"""Assert that a value must be a given type."""
if not isinstance(value, value_type):
if type(value_type) is tuple:
types = ', '.join(t.__name__ for t in value_type)
raise ValueError('{0} must be one of ({1})'.format(name, types))
else:
raise ValueError('{0} must be {1}'
.format(name, value_type.__name__))
class ValidationError(Exception):
"""Exception raised with validation fails
This exception is raised only when the ``validate`` functions of classes
that inherit from ``FieldClass`` are called. It implies that the classes
do not contain valid Vega JSON."""
pass
class KeyedList(list):
"""A list that can optionally be indexed by the ``name`` attribute of
its elements"""
def __init__(self, attr_name='name', *args, **kwargs):
self.attr_name = attr_name
list.__init__(self, *args, **kwargs)
def get_keys(self):
keys = [getattr(x, self.attr_name) for x in self]
if len(keys) != len(set(keys)):
raise ValidationError('duplicate keys found')
return keys
def __getitem__(self, key):
if isinstance(key, str_types):
keys = self.get_keys()
if key not in keys:
raise KeyError(' "{0}" is an invalid key'.format(key))
else:
return self[keys.index(key)]
else:
return list.__getitem__(self, key)
def __delitem__(self, key):
if isinstance(key, str_types):
keys = self.get_keys()
if key not in keys:
raise KeyError(' "{0}" is an invalid key'.format(key))
else:
list.__delitem__(self, keys.index(key))
else:
return list.__delitem__(self, key)
def __setitem__(self, key, value):
if isinstance(key, str_types):
if not hasattr(value, self.attr_name):
raise ValidationError(
'object must have ' + self.attr_name + ' attribute')
elif getattr(value, self.attr_name) != key:
raise ValidationError(
"key must be equal to '" + self.attr_name +
"' attribute")
keys = self.get_keys()
if key not in keys:
self.append(value)
else:
list.__setitem__(self, keys.index(key), value)
else:
list.__setitem__(self, key, value)
def grammar(grammar_type=None, grammar_name=None):
"""Decorator to define properties that map to the ``grammar``
dict. This dict is the canonical representation of the Vega grammar
within Vincent.
This decorator is intended for classes that map to some pre-defined JSON
structure, such as axes, data, marks, scales, etc. It is assumed that this
decorates functions with an instance of ``self.grammar``.
Parameters
----------
grammar_type : type or tuple of types, default None
If the argument to the decorated function is not of the given types,
then a ValueError is raised. No type checking is done if the type is
None (default).
grammar_name : string, default None
An optional name to map to the internal ``grammar`` dict. If None
(default), then the key for the dict is the name of the function
being decorated. If not None, then it will be the name specified
here. This is useful if the expected JSON field name is a Python
keyword or has an un-Pythonic name.
This should decorate a "validator" function that should return no value
but raise an exception if the provided value is not valid Vega grammar. If
the validator throws no exception, then the value is assigned to the
``grammar`` dict.
The validator function should take only one argument - the value to be
validated - so that no ``self`` argument is included; the validator
should not modify the class.
If no arguments are given, then no type-checking is done the property
will be mapped to a field with the name of the decorated function.
The doc string for the property is taken from the validator functions's
doc string.
"""
def grammar_creator(validator, name):
def setter(self, value):
if isinstance(grammar_type, (type, tuple)):
_assert_is_type(validator.__name__, value, grammar_type)
validator(value)
self.grammar[name] = value
def getter(self):
return self.grammar.get(name, None)
def deleter(self):
if name in self.grammar:
del self.grammar[name]
return property(getter, setter, deleter, validator.__doc__)
if isinstance(grammar_type, (type, tuple)):
# If grammar_type is a type, return another decorator.
def grammar_dec(validator):
# Make sure to use the grammar name if it's there.
if grammar_name:
return grammar_creator(validator, grammar_name)
else:
return grammar_creator(validator, validator.__name__)
return grammar_dec
elif isinstance(grammar_name, str_types):
# If grammar_name is a string, use that name and return another
# decorator.
def grammar_dec(validator):
return grammar_creator(validator, grammar_name)
return grammar_dec
else:
# Otherwise we assume that grammar_type is actually the function being
# decorated.
return grammar_creator(grammar_type, grammar_type.__name__)
class GrammarDict(dict):
"""The Vega Grammar. When called, obj.grammar returns a Python data
structure for the Vega Grammar. When printed, obj.grammar returns a
string representation."""
def __init__(self, *args, **kwargs):
"""Standard Dict init"""
dict.__init__(self, *args, **kwargs)
def encoder(self, obj):
"""Encode grammar objects for each level of hierarchy"""
if hasattr(obj, 'grammar'):
return obj.grammar
def __call__(self):
"""When called, return the Vega grammar as a Python data structure."""
return json.loads(json.dumps(self, default=self.encoder))
def __str__(self):
"""String representation of Vega Grammar"""
return json.dumps(self, default=self.encoder)
class GrammarClass(object):
"""Base class for objects that rely on an internal ``grammar`` dict. This
dict contains the complete Vega grammar.
This should be used as a superclass for classes that map to some JSON
structure. The JSON content is stored in an internal dict named
``grammar``.
"""
def __init__(self, **kwargs):
"""Initialize a GrammarClass
**kwargs are attribute-value pairs that are set on initialization.
These will generally be keys for the ``grammar`` dict. If the
attribute does not already exist as a property, then a
``ValueError`` is raised.
"""
self.grammar = GrammarDict()
for attr, value in sorted(kwargs.items()):
if hasattr(self, attr):
setattr(self, attr, value)
else:
raise ValueError('unknown keyword argument ' + attr)
def validate(self):
"""Validate the contents of the object.
This calls ``setattr`` for each of the class's grammar properties. It
will catch ``ValueError``s raised by the grammar property's setters
and re-raise them as :class:`ValidationError`.
"""
for key, val in self.grammar.items():
try:
setattr(self, key, val)
except ValueError as e:
raise ValidationError('invalid contents: ' + e.args[0])
def to_json(self, path=None, html_out=False,
html_path='vega_template.html', validate=False,
pretty_print=True):
"""Convert object to JSON
Parameters
----------
path: string, default None
Path to write JSON out. If there is no path provided, JSON
will be returned as a string to the console.
html_out: boolean, default False
If True, vincent will output an simple HTML scaffold to
visualize the vega json output.
html_path: string, default 'vega_template.html'
Path for the html file (if html_out=True)
validate : boolean
If True, call the object's `validate` method before
serializing. Default is False.
pretty_print : boolean
If True (default), JSON is printed in more-readable form with
indentation and spaces.
Returns
-------
string
JSON serialization of the class's grammar properties.
"""
if validate:
self.validate()
if pretty_print:
dumps_args = {'indent': 2, 'separators': (',', ': ')}
else:
dumps_args = {}
def encoder(obj):
if hasattr(obj, 'grammar'):
return obj.grammar
if html_out:
template = Template(
str(resource_string('vincent', 'vega_template.html')))
with open(html_path, 'w') as f:
f.write(template.substitute(path=path))
if path:
with open(path, 'w') as f:
json.dump(self.grammar, f, default=encoder, sort_keys=True,
**dumps_args)
else:
return json.dumps(self.grammar, default=encoder, sort_keys=True,
**dumps_args)
def from_json(self):
"""Load object from JSON
Not yet implemented.
"""
raise NotImplementedError()
class LoadError(Exception):
"""Exception for errors on loading data from third-party objects"""
pass
| mit |
uvacorpnet/transparent_nederlands | extract-organizations.py | 1 | 5108 | # Code to extract organization names from the 'waarde' field of Parlement.com position data
# @author Frank Takes - [email protected]
# @dependencies: Python 2, Pandas
# @run code using: python details.csv
# after running once iconv -f utf8 -t ascii//TRANSLIT originalfile > details.csv replace all x with umlaut/accent/etc by plain x (with x in klinkers)
import sys
import pandas as pd
import string
from unidecode import unidecode
# some custom "dictionaries"
positions = [
' lid', '^lid', 'vicevoorzitter', 'vice voorzitter', 'vicepresident', 'plaatsvervangend voorzitter', 'algemeen voorzitter', 'voorzitter', 'columnist', 'permanent vertegenwoordiger', 'secretaris', 'bedrijfs economisch medewerker', 'wetenschappelijk medewerker', 'medewerker', 'medewerkster', 'penningmeester',
'vertegenwoordiger', 'medewerker', 'concern directeur', 'directeur', 'senior adviseur', 'organisatie adviseur', 'intern adviseur', 'adviseur', 'eindredacteur', 'gastdocent', 'fellow', 'manager', 'officier'
]
bodies = [
'dagelijks bestuur', 'raad van bestuur', 'algemeen bestuur', 'bestuur', 'raad van advies', 'raad van toezicht', 'raad van commissarissen', 'curatorium', 'regiegroep', 'comite van aanbeveling'
]
parties = [
'vvd', 'cda', 'pvda', 'd66', 'christenunie', 'groenlinks', 'lpf',
'jovd', 'jonge socialisten', 'cdja', 'volkspartij', 'kvp', 'arp',
'politiek leider',
'provinciale staten', 'eerste kamer', 'tweede kamer', 'parlement', #'gemeenteraad'
'partijbestuur', 'minister', 'formateur', 'informateur', 'raad van state', 'staatssecretaris',
'ambteloos', 'tijdelijk vervangen'
]
# read the data
df = pd.read_csv(str(sys.argv[1]),sep=",")
df2 = df
# print some stats before and after filtering non "neven" positions
print (df.shape)
df = df[df.rubriek >= 3500]
print (df.shape)
# capitalize some stuff that should be capitalized for proper filtering later
df['waarde'] = df['waarde'].str.replace('gemeente', 'Gemeente', case=True)
df['waarde'] = df['waarde'].str.replace('waarnemend burgemeester', 'Waarnemend Burgemeester', case=True)
df['waarde'] = df['waarde'].str.replace('burgemeester', 'Burgemeester', case=True)
# create a lower case version of the "waarde" column
df['waardelower'] = df['waarde'].str.lower()
# remove forbidded strings related to party/political positions
forbiddenstrings = parties
forbiddenregex = '|'.join(forbiddenstrings)
df = df[df['waardelower'].str.match('(.*(' + forbiddenregex + ').*)').str.len() == 0]
# some stats after filtering out political positions
print ("\n")
print (df.shape)
# filter the type of position. order seems important, "plaatsvervangend voorzitter" will not be filtered if it is preceded by "voorzitter"
typestring = positions + bodies
typeregex = '|'.join(typestring)
df['organisatie'] = df['waarde'].str.replace('(\S*(' + typeregex + ')\S*)', '', case=False)
# organization starts at first occurence of uppercase
df['organisatie'] = df['organisatie'].str.lstrip(string.lowercase + ' ,().')
df['organisatie'] = df['organisatie'].str.replace('(\(|\)|\'|\"|\.|\-|\/)', ' ')
df['waarde'] = df['waarde'].str.replace('(\(|\)|\'|\"|\.|\-|\/)', ' ')
df['waarde'] = df['waarde'].str.replace(' ', ' ')
df['organisatie'] = df['organisatie'].str.replace(' ', ' ')
# remove everything after the comma
def delete_after_comma(x):
ind = x.find(",")
if ind > 0:
return x[:x.find(",")]
else: return x
df['organisatie'] = df['organisatie'].str.strip()
# type is whatever remains after removing the previously extracted organization
df['positionbody'] = df.apply(lambda x: (x['waarde'].replace(x['organisatie'],"")).lower(), axis=1)
df["organisatie"] = df["organisatie"].apply(lambda x: delete_after_comma(x))
df["positionbody"] = df["positionbody"].apply(lambda x: delete_after_comma(x))
df['positionbody'] = df['positionbody'].str.replace('(\(|\)|\'|\"|\,|\.|\-|\/)', ' ')
df['positionbody'] = df['positionbody'].str.strip()
df['positionbody'] = df['positionbody'].str.replace(' ', ' ')
# filter the body by excluding the position
positionstring = positions
positionregex = '|'.join(positionstring)
df['body'] = df['positionbody'].str.replace('(^\S*(' + positionregex + ')\S*)', '', case=False)
# filter the position by excluding the body from the positionbody
df['position'] = df.apply(lambda x: (x['positionbody'].replace(x['body'],"")).lower(), axis=1)
# clean it all
df['body'] = df['body'].str.strip()
df['body'] = df['body'].str.replace(' ', ' ')
df['position'] = df['position'].str.strip()
df['position'] = df['position'].str.replace(' ', ' ')
# print some stats
print (df['positionbody'].value_counts()[:40])
print ("\n")
print (df['position'].value_counts()[:40])
print ("\n")
print (df['body'].value_counts()[:40])
print ("\n")
print (df['organisatie'].value_counts()[:40])
print ("\n")
# merge with original dataset again (from which we removed < 3500 and party/political positions)
df = df.combine_first(df2)
# output to csv
df.to_csv('detailsfiltered.csv', sep=',', columns=['b1-nummer', 'rubriek', 'position', 'body', 'positionbody', 'organisatie', 'waarde', 'datum', 'toelichting'], index=False)
| gpl-3.0 |
aleksandr-bakanov/astropy | astropy/stats/spatial.py | 4 | 12862 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements functions and classes for spatial statistics.
"""
import numpy as np
import math
class RipleysKEstimator:
"""
Estimators for Ripley's K function for two-dimensional spatial data.
See [1]_, [2]_, [3]_, [4]_, [5]_ for detailed mathematical and
practical aspects of those estimators.
Parameters
----------
area : float
Area of study from which the points where observed.
x_max, y_max : float, float, optional
Maximum rectangular coordinates of the area of study.
Required if ``mode == 'translation'`` or ``mode == ohser``.
x_min, y_min : float, float, optional
Minimum rectangular coordinates of the area of study.
Required if ``mode == 'variable-width'`` or ``mode == ohser``.
Examples
--------
>>> import numpy as np
>>> from matplotlib import pyplot as plt # doctest: +SKIP
>>> from astropy.stats import RipleysKEstimator
>>> z = np.random.uniform(low=5, high=10, size=(100, 2))
>>> Kest = RipleysKEstimator(area=25, x_max=10, y_max=10,
... x_min=5, y_min=5)
>>> r = np.linspace(0, 2.5, 100)
>>> plt.plot(r, Kest.poisson(r)) # doctest: +SKIP
>>> plt.plot(r, Kest(data=z, radii=r, mode='none')) # doctest: +SKIP
>>> plt.plot(r, Kest(data=z, radii=r, mode='translation')) # doctest: +SKIP
>>> plt.plot(r, Kest(data=z, radii=r, mode='ohser')) # doctest: +SKIP
>>> plt.plot(r, Kest(data=z, radii=r, mode='var-width')) # doctest: +SKIP
>>> plt.plot(r, Kest(data=z, radii=r, mode='ripley')) # doctest: +SKIP
References
----------
.. [1] Peebles, P.J.E. *The large scale structure of the universe*.
<http://adsabs.harvard.edu/cgi-bin/nph-bib_query?bibcode=1980lssu.book.....P&db_key=AST>
.. [2] Spatial descriptive statistics.
<https://en.wikipedia.org/wiki/Spatial_descriptive_statistics>
.. [3] Package spatstat.
<https://cran.r-project.org/web/packages/spatstat/spatstat.pdf>
.. [4] Cressie, N.A.C. (1991). Statistics for Spatial Data,
Wiley, New York.
.. [5] Stoyan, D., Stoyan, H. (1992). Fractals, Random Shapes and
Point Fields, Akademie Verlag GmbH, Chichester.
"""
def __init__(self, area, x_max=None, y_max=None, x_min=None, y_min=None):
self.area = area
self.x_max = x_max
self.y_max = y_max
self.x_min = x_min
self.y_min = y_min
@property
def area(self):
return self._area
@area.setter
def area(self, value):
if isinstance(value, (float, int)) and value > 0:
self._area = value
else:
raise ValueError('area is expected to be a positive number. '
'Got {}.'.format(value))
@property
def y_max(self):
return self._y_max
@y_max.setter
def y_max(self, value):
if value is None or isinstance(value, (float, int)):
self._y_max = value
else:
raise ValueError('y_max is expected to be a real number '
'or None. Got {}.'.format(value))
@property
def x_max(self):
return self._x_max
@x_max.setter
def x_max(self, value):
if value is None or isinstance(value, (float, int)):
self._x_max = value
else:
raise ValueError('x_max is expected to be a real number '
'or None. Got {}.'.format(value))
@property
def y_min(self):
return self._y_min
@y_min.setter
def y_min(self, value):
if value is None or isinstance(value, (float, int)):
self._y_min = value
else:
raise ValueError('y_min is expected to be a real number. '
'Got {}.'.format(value))
@property
def x_min(self):
return self._x_min
@x_min.setter
def x_min(self, value):
if value is None or isinstance(value, (float, int)):
self._x_min = value
else:
raise ValueError('x_min is expected to be a real number. '
'Got {}.'.format(value))
def __call__(self, data, radii, mode='none'):
return self.evaluate(data=data, radii=radii, mode=mode)
def _pairwise_diffs(self, data):
npts = len(data)
diff = np.zeros(shape=(npts * (npts - 1) // 2, 2), dtype=np.double)
k = 0
for i in range(npts - 1):
size = npts - i - 1
diff[k:k + size] = abs(data[i] - data[i+1:])
k += size
return diff
def poisson(self, radii):
"""
Evaluates the Ripley K function for the homogeneous Poisson process,
also known as Complete State of Randomness (CSR).
Parameters
----------
radii : 1D array
Set of distances in which Ripley's K function will be evaluated.
Returns
-------
output : 1D array
Ripley's K function evaluated at ``radii``.
"""
return np.pi * radii * radii
def Lfunction(self, data, radii, mode='none'):
"""
Evaluates the L function at ``radii``. For parameter description
see ``evaluate`` method.
"""
return np.sqrt(self.evaluate(data, radii, mode=mode) / np.pi)
def Hfunction(self, data, radii, mode='none'):
"""
Evaluates the H function at ``radii``. For parameter description
see ``evaluate`` method.
"""
return self.Lfunction(data, radii, mode=mode) - radii
def evaluate(self, data, radii, mode='none'):
"""
Evaluates the Ripley K estimator for a given set of values ``radii``.
Parameters
----------
data : 2D array
Set of observed points in as a n by 2 array which will be used to
estimate Ripley's K function.
radii : 1D array
Set of distances in which Ripley's K estimator will be evaluated.
Usually, it's common to consider max(radii) < (area/2)**0.5.
mode : str
Keyword which indicates the method for edge effects correction.
Available methods are 'none', 'translation', 'ohser', 'var-width',
and 'ripley'.
* 'none'
this method does not take into account any edge effects
whatsoever.
* 'translation'
computes the intersection of rectangular areas centered at
the given points provided the upper bounds of the
dimensions of the rectangular area of study. It assumes that
all the points lie in a bounded rectangular region satisfying
x_min < x_i < x_max; y_min < y_i < y_max. A detailed
description of this method can be found on ref [4].
* 'ohser'
this method uses the isotropized set covariance function of
the window of study as a weight to correct for
edge-effects. A detailed description of this method can be
found on ref [4].
* 'var-width'
this method considers the distance of each observed point to
the nearest boundary of the study window as a factor to
account for edge-effects. See [3] for a brief description of
this method.
* 'ripley'
this method is known as Ripley's edge-corrected estimator.
The weight for edge-correction is a function of the
proportions of circumferences centered at each data point
which crosses another data point of interest. See [3] for
a detailed description of this method.
Returns
-------
ripley : 1D array
Ripley's K function estimator evaluated at ``radii``.
"""
data = np.asarray(data)
if not data.shape[1] == 2:
raise ValueError('data must be an n by 2 array, where n is the '
'number of observed points.')
npts = len(data)
ripley = np.zeros(len(radii))
if mode == 'none':
diff = self._pairwise_diffs(data)
distances = np.hypot(diff[:, 0], diff[:, 1])
for r in range(len(radii)):
ripley[r] = (distances < radii[r]).sum()
ripley = self.area * 2. * ripley / (npts * (npts - 1))
# eq. 15.11 Stoyan book page 283
elif mode == 'translation':
diff = self._pairwise_diffs(data)
distances = np.hypot(diff[:, 0], diff[:, 1])
intersec_area = (((self.x_max - self.x_min) - diff[:, 0]) *
((self.y_max - self.y_min) - diff[:, 1]))
for r in range(len(radii)):
dist_indicator = distances < radii[r]
ripley[r] = ((1 / intersec_area) * dist_indicator).sum()
ripley = (self.area**2 / (npts * (npts - 1))) * 2 * ripley
# Stoyan book page 123 and eq 15.13
elif mode == 'ohser':
diff = self._pairwise_diffs(data)
distances = np.hypot(diff[:, 0], diff[:, 1])
a = self.area
b = max((self.y_max - self.y_min) / (self.x_max - self.x_min),
(self.x_max - self.x_min) / (self.y_max - self.y_min))
x = distances / math.sqrt(a / b)
u = np.sqrt((x * x - 1) * (x > 1))
v = np.sqrt((x * x - b ** 2) * (x < math.sqrt(b ** 2 + 1)) * (x > b))
c1 = np.pi - 2 * x * (1 + 1 / b) + x * x / b
c2 = 2 * np.arcsin((1 / x) * (x > 1)) - 1 / b - 2 * (x - u)
c3 = (2 * np.arcsin(((b - u * v) / (x * x))
* (x > b) * (x < math.sqrt(b ** 2 + 1)))
+ 2 * u + 2 * v / b - b - (1 + x * x) / b)
cov_func = ((a / np.pi) * (c1 * (x >= 0) * (x <= 1)
+ c2 * (x > 1) * (x <= b)
+ c3 * (b < x) * (x < math.sqrt(b ** 2 + 1))))
for r in range(len(radii)):
dist_indicator = distances < radii[r]
ripley[r] = ((1 / cov_func) * dist_indicator).sum()
ripley = (self.area**2 / (npts * (npts - 1))) * 2 * ripley
# Cressie book eq 8.2.20 page 616
elif mode == 'var-width':
lt_dist = np.minimum(np.minimum(self.x_max - data[:, 0], self.y_max - data[:, 1]),
np.minimum(data[:, 0] - self.x_min, data[:, 1] - self.y_min))
for r in range(len(radii)):
for i in range(npts):
for j in range(npts):
if i != j:
diff = abs(data[i] - data[j])
dist = math.sqrt((diff * diff).sum())
if dist < radii[r] < lt_dist[i]:
ripley[r] = ripley[r] + 1
lt_dist_sum = (lt_dist > radii[r]).sum()
if not lt_dist_sum == 0:
ripley[r] = ripley[r] / lt_dist_sum
ripley = self.area * ripley / npts
# Cressie book eq 8.4.22 page 640
elif mode == 'ripley':
hor_dist = np.zeros(shape=(npts * (npts - 1)) // 2,
dtype=np.double)
ver_dist = np.zeros(shape=(npts * (npts - 1)) // 2,
dtype=np.double)
for k in range(npts - 1):
min_hor_dist = min(self.x_max - data[k][0],
data[k][0] - self.x_min)
min_ver_dist = min(self.y_max - data[k][1],
data[k][1] - self.y_min)
start = (k * (2 * (npts - 1) - (k - 1))) // 2
end = ((k + 1) * (2 * (npts - 1) - k)) // 2
hor_dist[start: end] = min_hor_dist * np.ones(npts - 1 - k)
ver_dist[start: end] = min_ver_dist * np.ones(npts - 1 - k)
diff = self._pairwise_diffs(data)
dist = np.hypot(diff[:, 0], diff[:, 1])
dist_ind = dist <= np.hypot(hor_dist, ver_dist)
w1 = (1 - (np.arccos(np.minimum(ver_dist, dist) / dist) +
np.arccos(np.minimum(hor_dist, dist) / dist)) / np.pi)
w2 = (3 / 4 - 0.5 * (
np.arccos(ver_dist / dist * ~dist_ind) +
np.arccos(hor_dist / dist * ~dist_ind)) / np.pi)
weight = dist_ind * w1 + ~dist_ind * w2
for r in range(len(radii)):
ripley[r] = ((dist < radii[r]) / weight).sum()
ripley = self.area * 2. * ripley / (npts * (npts - 1))
else:
raise ValueError(f'mode {mode} is not implemented.')
return ripley
| bsd-3-clause |
mfarhan12/pyrf | examples/calculate_channel_power.py | 1 | 1477 | #!/usr/bin/env python
from pyrf.devices.thinkrf import WSA
from pyrf.sweep_device import SweepDevice
from pyrf.numpy_util import calculate_channel_power
import sys
import time
import math
from matplotlib.pyplot import plot, figure, axis, xlabel, ylabel, show
import numpy as np
def smooth(list,degree=1):
new_list = []
list_average = np.mean(sorted(list)[int(0.995 * len(list)):-1]) + 5
for n, i in enumerate(list):
start = max(0, n - degree)
stop = min(len(list), n + degree)
points = list[start:stop]
if list[n] > list_average:
new_list.append(list[n])
else:
new_list.append(np.mean(points))
return new_list
# declare sweep constants
START_FREQ = 50e6
STOP_FREQ = 27e9
RBW = 100e3
VBW = 30e3
# connect to wsa
dut = WSA()
dut.connect(sys.argv[1])
dut.request_read_perm()
# declare sweep device
sd = SweepDevice(dut)
# read the spectral data
fstart, fstop, spectra_data = sd.capture_power_spectrum(START_FREQ, STOP_FREQ, RBW,
{'attenuator':0})
# apply the VBW algorith
spectra_data = smooth(spectra_data, max(1, RBW/VBW))
# calculate the channel power
linear = np.power(10, np.divide(spectra_data,20))
channel_power = 10 * np.log10(np.sum(np.square(linear)))
print channel_power
fig = figure(1)
xvalues = np.linspace(fstart, fstop, len(spectra_data))
xlabel("Frequency")
ylabel("Amplitude")
# plot something
plot(xvalues, spectra_data, color='blue')
# show graph
show() | bsd-3-clause |
daniel20162016/my-first | read_xml_all/calcul_matrix_je_le_qui_dans_de_192_matrix_compare_df_good.py | 2 | 34715 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 31 15:45:22 2016
@author: wang
"""
#from matplotlib import pylab as plt
#from numpy import fft, fromstring, int16, linspace
#import wave
from read_wav_xml_good_1 import*
from matrix_24_2 import*
from max_matrix_norm import*
import numpy as np
# open a wave file
#filename = 'francois_filon_pure_3.wav'
#filename_1 ='francois_filon_pure_3.xml'
#word ='je'
#word_2='le'
#word_3='qui'
#word_4='dans'
#word_5='de'
def calcul_matrix_je_le_qui_dans_de_192_matrix_compare_df_good(filename, filename_1,word,word_2,word_3,word_4,word_5):
#==============================================================================
# this is the parti for the 'je' start
#==============================================================================
wave_signal_float,framerate, word_start_point, word_length_point, word_end_point= read_wav_xml_good_1(filename,filename_1,word)
XJ_1 =wave_signal_float
t_step=1920;
t_entre_step=1440;
t_du_1_1 = int(word_start_point[0]);
t_du_1_2 = int(word_end_point[0]);
t_du_2_1 = int(word_start_point[1]);
t_du_2_2 = int(word_end_point[1]);
t_du_3_1 = int(word_start_point[2]);
t_du_3_2 = int(word_end_point[2]);
t_du_4_1 = int(word_start_point[3]);
t_du_4_2 = int(word_end_point[3]);
t_du_5_1 = int(word_start_point[4]);
t_du_5_2 = int(word_end_point[4]);
fs=framerate
#XJ_du_1 = wave_signal_float[(t_du_1_1-1):t_du_1_2];
#length_XJ_du_1 = int(word_length_point[0]+1);
#x1,y1,z1=matrix_24_2(XJ_du_1,fs)
#x1=max_matrix_norm(x1)
#==============================================================================
# this part is to calcul the first matrix
#==============================================================================
XJ_du_1_2 = XJ_1[(t_du_1_1-1):(t_du_1_1+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_1 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_1[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_du_1_1+t_entre_step*(i)-1):(t_du_1_1+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_1[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the second matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_2_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_2 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_2[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_2[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 3 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_3_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_3 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_3[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_3[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 4 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_4_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_4 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_4[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_4[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 5 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_5_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_5 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_5[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_5[24*i+j]=x1_all[j]
je_compare_1 = matrix_all_step_new_1
je_compare_2 = matrix_all_step_new_2
je_compare_3 = matrix_all_step_new_3
je_compare_4 = matrix_all_step_new_4
je_compare_5 = matrix_all_step_new_5
#==============================================================================
# # this is the parti for the 'je' end
#==============================================================================
#np.savez('je_le_qui_dans_de_192_matrix.npz',matrix_all_step_new_1,matrix_all_step_new_2,matrix_all_step_new_3,matrix_all_step_new_4,matrix_all_step_new_5)
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
# # # # # # # # demain, je continue ici
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
#==============================================================================
# this is the parti for the 'le' start
#==============================================================================
wave_signal_float,framerate, word_start_point, word_length_point, word_end_point= read_wav_xml_good_1(filename,filename_1,word_2)
XJ_1 =wave_signal_float
t_step=1920;
t_entre_step=1440;
t_du_1_1 = int(word_start_point[0]);
t_du_1_2 = int(word_end_point[0]);
t_du_2_1 = int(word_start_point[1]);
t_du_2_2 = int(word_end_point[1]);
t_du_3_1 = int(word_start_point[2]);
t_du_3_2 = int(word_end_point[2]);
t_du_4_1 = int(word_start_point[3]);
t_du_4_2 = int(word_end_point[3]);
t_du_5_1 = int(word_start_point[4]);
t_du_5_2 = int(word_end_point[4]);
fs=framerate
#XJ_du_1 = wave_signal_float[(t_du_1_1-1):t_du_1_2];
#length_XJ_du_1 = int(word_length_point[0]+1);
#x1,y1,z1=matrix_24_2(XJ_du_1,fs)
#x1=max_matrix_norm(x1)
#==============================================================================
# this part is to calcul the first matrix
#==============================================================================
XJ_du_1_2 = XJ_1[(t_du_1_1-1):(t_du_1_1+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_1 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_1[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_du_1_1+t_entre_step*(i)-1):(t_du_1_1+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_1[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the second matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_2_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_2 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_2[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_2[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 3 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_3_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_3 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_3[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_3[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 4 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_4_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_4 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_4[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_4[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 5 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_5_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_5 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_5[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_5[24*i+j]=x1_all[j]
le_compare_1 = matrix_all_step_new_1
le_compare_2 = matrix_all_step_new_2
le_compare_3 = matrix_all_step_new_3
le_compare_4 = matrix_all_step_new_4
le_compare_5 = matrix_all_step_new_5
#==============================================================================
# # this is the parti for the 'le' end
#==============================================================================
#==============================================================================
# this is the parti for the 'qui' start
#==============================================================================
wave_signal_float,framerate, word_start_point, word_length_point, word_end_point= read_wav_xml_good_1(filename,filename_1,word_3)
XJ_1 =wave_signal_float
t_step=1920;
t_entre_step=1440;
t_du_1_1 = int(word_start_point[0]);
t_du_1_2 = int(word_end_point[0]);
t_du_2_1 = int(word_start_point[1]);
t_du_2_2 = int(word_end_point[1]);
t_du_3_1 = int(word_start_point[2]);
t_du_3_2 = int(word_end_point[2]);
t_du_4_1 = int(word_start_point[3]);
t_du_4_2 = int(word_end_point[3]);
t_du_5_1 = int(word_start_point[4]);
t_du_5_2 = int(word_end_point[4]);
fs=framerate
#XJ_du_1 = wave_signal_float[(t_du_1_1-1):t_du_1_2];
#length_XJ_du_1 = int(word_length_point[0]+1);
#x1,y1,z1=matrix_24_2(XJ_du_1,fs)
#x1=max_matrix_norm(x1)
#==============================================================================
# this part is to calcul the first matrix
#==============================================================================
XJ_du_1_2 = XJ_1[(t_du_1_1-1):(t_du_1_1+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_1 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_1[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_du_1_1+t_entre_step*(i)-1):(t_du_1_1+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_1[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the second matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_2_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_2 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_2[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_2[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 3 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_3_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_3 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_3[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_3[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 4 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_4_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_4 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_4[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_4[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 5 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_5_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_5 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_5[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_5[24*i+j]=x1_all[j]
qui_compare_1 = matrix_all_step_new_1
qui_compare_2 = matrix_all_step_new_2
qui_compare_3 = matrix_all_step_new_3
qui_compare_4 = matrix_all_step_new_4
qui_compare_5 = matrix_all_step_new_5
#==============================================================================
# this is the parti for the 'dans' start
#==============================================================================
wave_signal_float,framerate, word_start_point, word_length_point, word_end_point= read_wav_xml_good_1(filename,filename_1,word_4)
XJ_1 =wave_signal_float
t_step=1920;
t_entre_step=1440;
t_du_1_1 = int(word_start_point[0]);
t_du_1_2 = int(word_end_point[0]);
t_du_2_1 = int(word_start_point[1]);
t_du_2_2 = int(word_end_point[1]);
t_du_3_1 = int(word_start_point[2]);
t_du_3_2 = int(word_end_point[2]);
t_du_4_1 = int(word_start_point[3]);
t_du_4_2 = int(word_end_point[3]);
t_du_5_1 = int(word_start_point[4]);
t_du_5_2 = int(word_end_point[4]);
fs=framerate
#XJ_du_1 = wave_signal_float[(t_du_1_1-1):t_du_1_2];
#length_XJ_du_1 = int(word_length_point[0]+1);
#x1,y1,z1=matrix_24_2(XJ_du_1,fs)
#x1=max_matrix_norm(x1)
#==============================================================================
# this part is to calcul the first matrix
#==============================================================================
XJ_du_1_2 = XJ_1[(t_du_1_1-1):(t_du_1_1+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_1 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_1[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_du_1_1+t_entre_step*(i)-1):(t_du_1_1+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_1[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the second matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_2_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_2 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_2[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_2[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 3 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_3_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_3 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_3[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_3[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 4 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_4_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_4 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_4[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_4[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 5 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_5_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_5 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_5[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_5[24*i+j]=x1_all[j]
dans_compare_1 = matrix_all_step_new_1
dans_compare_2 = matrix_all_step_new_2
dans_compare_3 = matrix_all_step_new_3
dans_compare_4 = matrix_all_step_new_4
dans_compare_5 = matrix_all_step_new_5
#==============================================================================
# this is the parti for the 'de' start
#==============================================================================
wave_signal_float,framerate, word_start_point, word_length_point, word_end_point= read_wav_xml_good_1(filename,filename_1,word_5)
XJ_1 =wave_signal_float
t_step=1920;
t_entre_step=1440;
t_du_1_1 = int(word_start_point[0]);
t_du_1_2 = int(word_end_point[0]);
t_du_2_1 = int(word_start_point[1]);
t_du_2_2 = int(word_end_point[1]);
t_du_3_1 = int(word_start_point[2]);
t_du_3_2 = int(word_end_point[2]);
t_du_4_1 = int(word_start_point[3]);
t_du_4_2 = int(word_end_point[3]);
t_du_5_1 = int(word_start_point[4]);
t_du_5_2 = int(word_end_point[4]);
fs=framerate
#XJ_du_1 = wave_signal_float[(t_du_1_1-1):t_du_1_2];
#length_XJ_du_1 = int(word_length_point[0]+1);
#x1,y1,z1=matrix_24_2(XJ_du_1,fs)
#x1=max_matrix_norm(x1)
#==============================================================================
# this part is to calcul the first matrix
#==============================================================================
XJ_du_1_2 = XJ_1[(t_du_1_1-1):(t_du_1_1+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_1 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_1[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_du_1_1+t_entre_step*(i)-1):(t_du_1_1+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_1[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the second matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_2_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_2 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_2[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_2[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 3 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_3_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_3 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_3[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_3[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 4 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_4_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_4 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_4[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_4[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 5 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_5_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_5 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_5[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_5[24*i+j]=x1_all[j]
de_compare_1 = matrix_all_step_new_1
de_compare_2 = matrix_all_step_new_2
de_compare_3 = matrix_all_step_new_3
de_compare_4 = matrix_all_step_new_4
de_compare_5 = matrix_all_step_new_5
#==============================================================================
# # this is the parti for the 'le' end
#==============================================================================
np.savez('je_le_qui_dans_de_192_matrix_compare.npz',je_compare_1,je_compare_2,je_compare_3,je_compare_4,je_compare_5,le_compare_1,le_compare_2,le_compare_3,le_compare_4,le_compare_5,qui_compare_1,qui_compare_2,qui_compare_3,qui_compare_4,qui_compare_5,dans_compare_1,dans_compare_2,dans_compare_3,dans_compare_4,dans_compare_5,de_compare_1,de_compare_2,de_compare_3,de_compare_4,de_compare_5)
finish_2=1
return finish_2 | mit |
darwinex/DarwinexLabs | tools/dwx_zeromq_connector/v2.0.1/EXAMPLES/TEMPLATE/STRATEGIES/coin_flip_traders_v1.0.py | 1 | 9700 | # -*- coding: utf-8 -*-
"""
coin_flip_traders.py
An example trading strategy created using the Darwinex ZeroMQ Connector
for Python 3 and MetaTrader 4.
Source code:
https://github.com/darwinex/DarwinexLabs/tree/master/tools/dwx_zeromq_connector
The strategy launches 'n' threads (each representing a trader responsible
for trading one instrument)
Each trader must:
1) Execute a maximum of 1 trade at any given time.
2) Close existing trades after they have been in execution for
5 seconds.
3) Flip a coin - random.randombits(1) - to decide on a BUY or SELL
4) Keep trading until the market is closed (_market_open = False)
--
@author: Darwinex Labs (www.darwinex.com)
Copyright (c) 2019 onwards, Darwinex. All rights reserved.
Licensed under the BSD 3-Clause License, you may not use this file except
in compliance with the License.
You may obtain a copy of the License at:
https://opensource.org/licenses/BSD-3-Clause
"""
import os
#############################################################################
#############################################################################
_path = '<PATH_TO_ROOT_DIR_CONTAINING_DWX_ZEROMQ_CONNECTOR>'
os.chdir(_path)
#############################################################################
#############################################################################
from EXAMPLES.TEMPLATE.STRATEGIES.BASE.DWX_ZMQ_Strategy import DWX_ZMQ_Strategy
from pandas import Timedelta, to_datetime
from threading import Thread, Lock
from time import sleep
import random
class coin_flip_traders(DWX_ZMQ_Strategy):
def __init__(self, _name="COIN_FLIP_TRADERS",
_symbols=[('EURUSD',0.01),
('AUDNZD',0.01),
('GBPUSD',0.01),
('USDJPY',0.01),
('AUDUSD',0.01),
('XTIUSD',0.01),
('GBPJPY',0.01),
('NZDCHF',0.01),
('EURCAD',0.01)],
_delay=0.1,
_broker_gmt=3,
_verbose=False,
_max_trades=1,
_close_t_delta=5):
super().__init__(_name,
_symbols,
_broker_gmt,
_verbose)
# This strategy's variables
self._traders = []
self._market_open = True
self._max_trades = _max_trades
self._close_t_delta = _close_t_delta
self._delay = _delay
self._verbose = _verbose
# lock for acquire/release of ZeroMQ connector
self._lock = Lock()
##########################################################################
def _run_(self):
"""
Logic:
For each symbol in self._symbols:
1) Open a new Market Order every 2 seconds
2) Close any orders that have been running for 10 seconds
3) Calculate Open P&L every second
4) Plot Open P&L in real-time
5) Lot size per trade = 0.01
6) SL/TP = 10 pips each
"""
# Launch traders!
for _symbol in self._symbols:
_t = Thread(name="{}_Trader".format(_symbol[0]),
target=self._trader_, args=(_symbol,self._max_trades))
_t.daemon = True
_t.start()
print('[{}_Trader] Alright, here we go.. Gerrrronimooooooooooo! ..... xD'.format(_symbol[0]))
self._traders.append(_t)
print('\n\n+--------------+\n+ LIVE UPDATES +\n+--------------+\n')
# _verbose can print too much information.. so let's start a thread
# that prints an update for instructions flowing through ZeroMQ
self._updater_ = Thread(name='Live_Updater',
target=self._updater_,
args=(self._delay,))
self._updater_.daemon = True
self._updater_.start()
##########################################################################
def _updater_(self, _delay=0.1):
while self._market_open:
try:
# Acquire lock
self._lock.acquire()
print('\r{}'.format(str(self._zmq._get_response_())), end='', flush=True)
finally:
# Release lock
self._lock.release()
sleep(self._delay)
##########################################################################
def _trader_(self, _symbol, _max_trades):
# Note: Just for this example, only the Order Type is dynamic.
_default_order = self._zmq._generate_default_order_dict()
_default_order['_symbol'] = _symbol[0]
_default_order['_lots'] = _symbol[1]
_default_order['_SL'] = _default_order['_TP'] = 100
_default_order['_comment'] = '{}_Trader'.format(_symbol[0])
"""
Default Order:
--
{'_action': 'OPEN',
'_type': 0,
'_symbol': EURUSD,
'_price':0.0,
'_SL': 100, # 10 pips
'_TP': 100, # 10 pips
'_comment': 'EURUSD_Trader',
'_lots': 0.01,
'_magic': 123456}
"""
while self._market_open:
try:
# Acquire lock
self._lock.acquire()
#############################
# SECTION - GET OPEN TRADES #
#############################
_ot = self._reporting._get_open_trades_('{}_Trader'.format(_symbol[0]),
self._delay,
10)
# Reset cycle if nothing received
if self._zmq._valid_response_(_ot) == False:
continue
###############################
# SECTION - CLOSE OPEN TRADES #
###############################
for i in _ot.index:
if abs((Timedelta((to_datetime('now') + Timedelta(self._broker_gmt,'h')) - to_datetime(_ot.at[i,'_open_time'])).total_seconds())) > self._close_t_delta:
_ret = self._execution._execute_({'_action': 'CLOSE',
'_ticket': i,
'_comment': '{}_Trader'.format(_symbol[0])},
self._verbose,
self._delay,
10)
# Reset cycle if nothing received
if self._zmq._valid_response_(_ret) == False:
break
# Sleep between commands to MetaTrader
sleep(self._delay)
##############################
# SECTION - OPEN MORE TRADES #
##############################
if _ot.shape[0] < _max_trades:
# Randomly generate 1 (OP_BUY) or 0 (OP_SELL)
# using random.getrandbits()
_default_order['_type'] = random.getrandbits(1)
# Send instruction to MetaTrader
_ret = self._execution._execute_(_default_order,
self._verbose,
self._delay,
10)
# Reset cycle if nothing received
if self._zmq._valid_response_(_ret) == False:
continue
finally:
# Release lock
self._lock.release()
# Sleep between cycles
sleep(self._delay)
##########################################################################
def _stop_(self):
self._market_open = False
for _t in self._traders:
# Setting _market_open to False will stop each "trader" thread
# from doing anything more. So wait for them to finish.
_t.join()
print('\n[{}] .. and that\'s a wrap! Time to head home.\n'.format(_t.getName()))
# Kill the updater too
self._updater_.join()
print('\n\n{} .. wait for me.... I\'m going home too! xD\n'.format(self._updater_.getName()))
# Send mass close instruction to MetaTrader in case anything's left.
self._zmq._DWX_MTX_CLOSE_ALL_TRADES_()
##########################################################################
| bsd-3-clause |
brillliantz/Quantitative_Finance | Initialize_module_part.py | 1 | 6448 | import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# import matplotlib.finance as mf
from matplotlib.widgets import MultiCursor
import statsmodels.tsa.stattools as stt
# import scipy.signal as sgn
import statsmodels.api as sm
# from statsmodels.sandbox.regression.predstd import wls_prediction_std
# from matplotlib.mlab import PCA
from collections import defaultdict
#------------------------------------------------
'''Some time length'''
night_len = int(4*3600*2.5)
mor_len = int(4*3600*2.25)
aftn_len = int(4*3600*1.5)
day_len = night_len + mor_len + aftn_len + 4
#-----------------------------------------------
'''add columns'''
def AddCol(df):
vol = df.ix[:, 'volume'].diff()
# this addition is for the convenience of Log y scale plot
# vol +=1
vol = vol.rename('vol_diff')
openint = df.ix[:, 'openInterest'].diff()
# this addition is for the convenience of Log y scale plot
# openint += 1
openint = openint.rename('openInt_diff')
mid = (df.ix[:, 'askPrc_0'] + df.ix[:, 'bidPrc_0']) / 2.
mid = mid.rename('midPrc')
ret = df.join([vol, openint, mid])
return ret
# -------------------------------------------------
def ForwardDiff(df, n=1):
"""Calculate the difference of value after n rows.
Parameters
----------
df : pandas DataFrame
n : int
Returns
-------
ret : DataFrame.
"""
ret = df.diff(periods=n)
ret = ret.shift(periods= -1 * n)
ret = ret.dropna()
return ret
def CutHighVar(df, length=200):
'''
Purpose: Cut a small period after opening in the morning and at night.
Because this time range, the var of price is high, which harmd our model.
df: pd.DataFrame or pd.Series. With datetime index
length: int. the length you want to cut, counted in ticks. Cannot be larger than 240
'''
ret = df
bool_arr1 = np.logical_or(ret.index.hour == 21, ret.index.hour == 9)
bool_arr = np.logical_and.reduce([bool_arr1,
ret.index.minute == 0,
ret.index.second <= int(length//4) - 1])
ret = ret[np.logical_not(bool_arr)]
return ret
def CutTail(df, length=60):
'''
Purpose: Cut a small period before market close.
df: pd.DataFrame or pd.Series. With datetime index
length: int. the length you want to cut, counted in ticks. Cannot be larger than 240
'''
ret = df
last_boolean1 = np.logical_and.reduce(
[ret.index.hour == 14,
ret.index.minute == 59,
ret.index.second >= 60 - int(length//4)])
# this is the last tick
last_boolean2 = ret.index.hour == 15
ret = ret[np.logical_not(np.logical_or(last_boolean1, last_boolean2))]
return ret
def DayChangeNum(ser, distance=7):
'''
ser is price move series after process.
distance counting in hours
'''
h = ser.index.hour
h_diff = np.diff(h)
h_diff = np.insert(h_diff, 1, 0)
ret = np.where(np.abs(h_diff) > distance)[0]
return ret
# def NormPriceMove(ser, daychgnum):
# ret = ser.copy()
# for i in range(len(daychgnum) - 1):
# mysamp = ret.iloc[daychgnum[i]: daychgnum[i+1]]
# #print mysamp
# mystd = mysamp.std()
# print mystd
# ret.iloc[daychgnum[i]: daychgnum[i+1]] /= mystd
# return ret
def CuthlLimit(df, forward=60, backward=100, how='all', depth=0):
"""Cut those reach high low Limit, including an extended length around them
Parameters
----------
df : Original DataFrame including all level quote infomation
forward : forward_ticks of price move
backward : sample length needed to generate an indicator
how : only consider highLimit, lowLimit or allLimit
depth : consider price which level quote reach high low Limit
Returns
-------
ret : selected boolean array
"""
extend_len = 2 * max([forward, backward]) + 1
s1 = 'bidQty_' + str(depth)
s2 = 'askQty_' + str(depth)
if how == 'all':
arr1 = df.ix[:, s1] == 0
arr2 = df[s2] == 0
bool_arr = np.logical_or(arr1, arr2)
#bool_arr = np.logical_or(df[s1] == 0, df[s2] == 0)
elif how == 'bid':
bool_arr = (df[s1] == 0)
elif how == 'ask':
bool_arr = (df[s2] == 0)
else:
print 'ERROR!'
float_arr = bool_arr.astype(float)
float_arr_diffusion = pd.Series(data=float_arr).rolling(window=extend_len, center=True).mean()
dicard_arr = float_arr_diffusion.fillna(value=1.).astype(bool)
return np.logical_not(dicard_arr)
def GiveMePM(df, nforward=60, nbackward=100, lim=[0, 30], cutdepth=0, norm=False, high_var_length=200):
"""from original DataFrame calculate price move Series,
including CutTail and CutHighVar.
Parameters
----------
df : the Original DataFrame.
forward : forward_ticks of price move
backward : sample length needed to generate an indicator
n : forward_ticks
lim : can be like (0, 20), counting in days, or an int array of index.
norm : if True, normalize the price move using every day std.
Returns
-------
ret : price move series.
"""
global day_len
if len(lim) == 2:
samp = df.ix[day_len*lim[0]: day_len*lim[1], 'midPrc']
else:
samp = df.ix[lim, 'midPrc']
#print 'samp'
ret = ForwardDiff(samp, nforward)
#print 'ForwardDiff'
# ret = CuthlLimit(ret, how='all', depth=cutdepth).loc[:, 'midPrc']
# #print 'CuthlLimit'
ret = CutTail(ret, nforward)
#print 'CutTail'
cut_head_length = max([high_var_length, nbackward])
ret = CutHighVar(ret, length=cut_head_length)
#print 'CutHighVar'
# if norm:
# ret_daychangenum = DayChangeNum(ret)
# ret = NormPriceMove(ret, ret_daychangenum)
selected_arr = CuthlLimit(df, forward=nforward, backward=nbackward, how='all', depth=cutdepth)
return ret[selected_arr].dropna()
def GiveMeIndex(arri, arro):
'''
Generate integer index
arr is a two dim ndarray, with each element being a time range(counting in days).
'''
global day_len
index_in = list()
for k in arri:
index_in = index_in + list(range(day_len * k[0], day_len * k[1]))
index_out = list()
for k in arro:
index_out = index_out + list(range(day_len * k[0], day_len * k[1]))
return index_in, index_out
| cc0-1.0 |
marioem/data-science-from-scratch | code/visualizing_data.py | 58 | 5116 | import matplotlib.pyplot as plt
from collections import Counter
def make_chart_simple_line_chart(plt):
years = [1950, 1960, 1970, 1980, 1990, 2000, 2010]
gdp = [300.2, 543.3, 1075.9, 2862.5, 5979.6, 10289.7, 14958.3]
# create a line chart, years on x-axis, gdp on y-axis
plt.plot(years, gdp, color='green', marker='o', linestyle='solid')
# add a title
plt.title("Nominal GDP")
# add a label to the y-axis
plt.ylabel("Billions of $")
plt.show()
def make_chart_simple_bar_chart(plt):
movies = ["Annie Hall", "Ben-Hur", "Casablanca", "Gandhi", "West Side Story"]
num_oscars = [5, 11, 3, 8, 10]
# bars are by default width 0.8, so we'll add 0.1 to the left coordinates
# so that each bar is centered
xs = [i + 0.1 for i, _ in enumerate(movies)]
# plot bars with left x-coordinates [xs], heights [num_oscars]
plt.bar(xs, num_oscars)
plt.ylabel("# of Academy Awards")
plt.title("My Favorite Movies")
# label x-axis with movie names at bar centers
plt.xticks([i + 0.5 for i, _ in enumerate(movies)], movies)
plt.show()
def make_chart_histogram(plt):
grades = [83,95,91,87,70,0,85,82,100,67,73,77,0]
decile = lambda grade: grade // 10 * 10
histogram = Counter(decile(grade) for grade in grades)
plt.bar([x - 4 for x in histogram.keys()], # shift each bar to the left by 4
histogram.values(), # give each bar its correct height
8) # give each bar a width of 8
plt.axis([-5, 105, 0, 5]) # x-axis from -5 to 105,
# y-axis from 0 to 5
plt.xticks([10 * i for i in range(11)]) # x-axis labels at 0, 10, ..., 100
plt.xlabel("Decile")
plt.ylabel("# of Students")
plt.title("Distribution of Exam 1 Grades")
plt.show()
def make_chart_misleading_y_axis(plt, mislead=True):
mentions = [500, 505]
years = [2013, 2014]
plt.bar([2012.6, 2013.6], mentions, 0.8)
plt.xticks(years)
plt.ylabel("# of times I heard someone say 'data science'")
# if you don't do this, matplotlib will label the x-axis 0, 1
# and then add a +2.013e3 off in the corner (bad matplotlib!)
plt.ticklabel_format(useOffset=False)
if mislead:
# misleading y-axis only shows the part above 500
plt.axis([2012.5,2014.5,499,506])
plt.title("Look at the 'Huge' Increase!")
else:
plt.axis([2012.5,2014.5,0,550])
plt.title("Not So Huge Anymore.")
plt.show()
def make_chart_several_line_charts(plt):
variance = [1,2,4,8,16,32,64,128,256]
bias_squared = [256,128,64,32,16,8,4,2,1]
total_error = [x + y for x, y in zip(variance, bias_squared)]
xs = range(len(variance))
# we can make multiple calls to plt.plot
# to show multiple series on the same chart
plt.plot(xs, variance, 'g-', label='variance') # green solid line
plt.plot(xs, bias_squared, 'r-.', label='bias^2') # red dot-dashed line
plt.plot(xs, total_error, 'b:', label='total error') # blue dotted line
# because we've assigned labels to each series
# we can get a legend for free
# loc=9 means "top center"
plt.legend(loc=9)
plt.xlabel("model complexity")
plt.title("The Bias-Variance Tradeoff")
plt.show()
def make_chart_scatter_plot(plt):
friends = [ 70, 65, 72, 63, 71, 64, 60, 64, 67]
minutes = [175, 170, 205, 120, 220, 130, 105, 145, 190]
labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']
plt.scatter(friends, minutes)
# label each point
for label, friend_count, minute_count in zip(labels, friends, minutes):
plt.annotate(label,
xy=(friend_count, minute_count), # put the label with its point
xytext=(5, -5), # but slightly offset
textcoords='offset points')
plt.title("Daily Minutes vs. Number of Friends")
plt.xlabel("# of friends")
plt.ylabel("daily minutes spent on the site")
plt.show()
def make_chart_scatterplot_axes(plt, equal_axes=False):
test_1_grades = [ 99, 90, 85, 97, 80]
test_2_grades = [100, 85, 60, 90, 70]
plt.scatter(test_1_grades, test_2_grades)
plt.xlabel("test 1 grade")
plt.ylabel("test 2 grade")
if equal_axes:
plt.title("Axes Are Comparable")
plt.axis("equal")
else:
plt.title("Axes Aren't Comparable")
plt.show()
def make_chart_pie_chart(plt):
plt.pie([0.95, 0.05], labels=["Uses pie charts", "Knows better"])
# make sure pie is a circle and not an oval
plt.axis("equal")
plt.show()
if __name__ == "__main__":
make_chart_simple_line_chart(plt)
make_chart_simple_bar_chart(plt)
make_chart_histogram(plt)
make_chart_misleading_y_axis(plt, mislead=True)
make_chart_misleading_y_axis(plt, mislead=False)
make_chart_several_line_charts(plt)
make_chart_scatterplot_axes(plt, equal_axes=False)
make_chart_scatterplot_axes(plt, equal_axes=True)
make_chart_pie_chart(plt)
| unlicense |
kaloix/home-sensor | server.py | 2 | 21927 | #!/usr/bin/env python3
import collections
import configparser
import contextlib
import csv
import datetime
import itertools
import json
import locale
import logging
import queue
import shutil
import time
import dateutil.rrule
import matplotlib.dates
import matplotlib.pyplot
import pysolar
import pytz
import api
import notify
import utility
ALLOWED_DOWNTIME = datetime.timedelta(minutes=30)
COLOR_CYCLE = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
DATA_DIR = 'data/'
INTERVAL = 60
PAUSE_WARN_FAILURE = 30 * 24 * 60 * 60
PAUSE_WARN_VALUE = 24 * 60 * 60
PLOT_INTERVAL = 10 * 60
RECORD_DAYS = 7
SUMMARY_DAYS = 183
TIMEZONE = pytz.timezone('Europe/Berlin')
WEB_DIR = '/home/kaloix/html/sensor/'
config = configparser.ConfigParser()
groups = collections.defaultdict(collections.OrderedDict)
inbox = queue.Queue()
now = datetime.datetime.now(tz=datetime.timezone.utc)
Record = collections.namedtuple('Record', 'timestamp value')
Summary = collections.namedtuple('Summary', 'date minimum maximum')
Uptime = collections.namedtuple('Uptime', 'date value')
def main():
global now
utility.logging_config()
locale.setlocale(locale.LC_ALL, 'de_DE.UTF-8')
config.read('config.ini')
with open('sensor.json') as json_file:
sensor_json = json_file.read()
devices = json.loads(sensor_json,
object_pairs_hook=collections.OrderedDict)
for device in devices:
for kind, attr in device['output'].items():
if kind == 'temperature':
groups[attr['group']][attr['name']] = Temperature(
attr['low'],
attr['high'],
attr['name'],
device['input']['interval'],
attr['fail-notify'])
elif kind == 'switch':
groups[attr['group']][attr['name']] = Switch(
attr['name'],
device['input']['interval'],
attr['fail-notify'])
with website(), api.ApiServer(accept_record), \
notify.MailSender(
config['email']['source_address'],
config['email']['admin_address'],
config['email']['user_address'],
config['email'].getboolean('enable_email')) as mail:
while True:
# get new record
start = time.perf_counter()
now = datetime.datetime.now(tz=datetime.timezone.utc)
record_counter = int()
with contextlib.suppress(queue.Empty):
while True:
group, name, record = inbox.get(block=False)
groups[group][name].save(record)
record_counter += 1
# update content
for group, series_dict in groups.items():
for series in series_dict.values():
if series.error:
mail.queue(series.error, PAUSE_WARN_FAILURE)
if series.warning:
mail.queue(series.warning, PAUSE_WARN_VALUE)
detail_html(group, series_dict.values())
with contextlib.suppress(utility.CallDenied):
make_plots()
mail.send_all()
# log processing
utility.memory_check()
logging.info('updated website in {:.3f}s, {} new records'.format(
time.perf_counter() - start, record_counter))
time.sleep(INTERVAL)
@contextlib.contextmanager
def website():
shutil.copy('static/favicon.png', WEB_DIR)
shutil.copy('static/htaccess', WEB_DIR + '.htaccess')
shutil.copy('static/index.html', WEB_DIR)
try:
yield
finally:
logging.info('disable website')
shutil.copy('static/htaccess_maintenance', WEB_DIR + '.htaccess')
def accept_record(group, name, timestamp, value):
timestamp = datetime.datetime.fromtimestamp(int(timestamp),
tz=datetime.timezone.utc)
logging.info('{}: {} / {}'.format(name, timestamp, value))
filename = '{}/{}_{}.csv'.format(DATA_DIR, name,
timestamp.astimezone(TIMEZONE).year)
with open(filename, mode='a', newline='') as csv_file:
writer = csv.writer(csv_file)
writer.writerow((int(timestamp.timestamp()), value))
inbox.put((group, name, Record(timestamp, value)))
def detail_html(group, series_list):
text = list()
text.append('<ul>')
for series in series_list:
text.append('<li>{}</li>'.format(series))
text.append('</ul>')
values = '\n'.join(text)
filename = '{}{}.html'.format(WEB_DIR, group)
with open(filename, mode='w') as html_file:
html_file.write(values)
@utility.allow_every_x_seconds(PLOT_INTERVAL)
def make_plots():
for group, series_dict in groups.items():
# FIXME svg backend has memory leak in matplotlib 1.4.3
plot_history(series_dict.values(), '{}{}.png'.format(WEB_DIR, group))
def _nighttime(count, date_time):
date_time -= datetime.timedelta(days=count)
sun_change = list()
for c in range(0, count + 1):
date_time += datetime.timedelta(days=1)
sun_change.extend(pysolar.util.get_sunrise_sunset(
49.2, 11.08, date_time))
sun_change = sun_change[1:-1]
for r in range(0, count):
yield sun_change[2 * r], sun_change[2 * r + 1]
def _plot_records(series_list, days):
color_iter = iter(COLOR_CYCLE)
for series in series_list:
color = next(color_iter)
if type(series) is Temperature:
parts = list()
for record in series.day if days == 1 else series.records:
if (not parts or record.timestamp - parts[-1][-1].timestamp >
ALLOWED_DOWNTIME):
parts.append(list())
parts[-1].append(record)
for part in parts:
timestamps, values = zip(*part)
matplotlib.pyplot.plot(timestamps, values, label=series.name,
linewidth=2, color=color, zorder=3)
elif type(series) is Switch:
for start, end in series.segments(series.records):
matplotlib.pyplot.axvspan(start, end, label=series.name,
color=color, alpha=0.5, zorder=1)
for sunset, sunrise in _nighttime(days + 1, now):
matplotlib.pyplot.axvspan(sunset, sunrise, label='Nacht', hatch='//',
facecolor='0.9', edgecolor='0.8', zorder=0)
matplotlib.pyplot.xlim(now - datetime.timedelta(days), now)
matplotlib.pyplot.ylabel('Temperatur °C')
ax = matplotlib.pyplot.gca() # FIXME not available in mplrc 1.4.3
ax.yaxis.tick_right()
ax.yaxis.set_label_position('right')
def _plot_summary(series_list):
ax1 = matplotlib.pyplot.gca() # FIXME not available in mplrc 1.4.3
ax2 = ax1.twinx()
color_iter = iter(COLOR_CYCLE)
switch = False
for series in series_list:
color = next(color_iter)
if type(series) is Temperature:
parts = list()
for summary in series.summary:
if (not parts or summary.date - parts[-1][-1].date >
datetime.timedelta(days=7)):
parts.append(list())
parts[-1].append(summary)
for part in parts:
dates, mins, maxs = zip(*part)
ax1.fill_between(dates, mins, maxs, label=series.name,
color=color, alpha=0.5, interpolate=True,
zorder=0)
elif type(series) is Switch:
switch = True
dates, values = zip(*series.summary)
ax2.plot(dates, values, color=color,
marker='o', linestyle='', zorder=1)
today = now.astimezone(TIMEZONE).date()
matplotlib.pyplot.xlim(today - datetime.timedelta(days=SUMMARY_DAYS),
today)
ax1.set_ylabel('Temperatur °C')
ax1.yaxis.tick_right()
ax1.yaxis.set_label_position('right')
if switch:
ax2.set_ylabel('Laufzeit h')
ax2.yaxis.tick_left()
ax2.yaxis.set_label_position('left')
ax2.grid(False)
else:
ax2.set_visible(False)
def plot_history(series_list, file):
fig = matplotlib.pyplot.figure(figsize=(12, 7))
# last week
ax = matplotlib.pyplot.subplot(312)
_plot_records(series_list, RECORD_DAYS)
frame_start = now - datetime.timedelta(days=RECORD_DAYS)
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%a.'))
ax.xaxis.set_ticks(_day_locator(frame_start, now, TIMEZONE))
ax.xaxis.set_ticks(_hour_locator(frame_start, now, 6, TIMEZONE),
minor=True)
handles, labels = ax.get_legend_handles_labels()
# last day
ax = matplotlib.pyplot.subplot(311)
_plot_records(series_list, 1)
matplotlib.pyplot.legend(
handles=list(collections.OrderedDict(zip(labels, handles)).values()),
loc='lower left', bbox_to_anchor=(0, 1), ncol=5, frameon=False)
frame_start = now - datetime.timedelta(days=1)
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H'))
ax.xaxis.set_ticks(_hour_locator(frame_start, now, 2, TIMEZONE))
ax.xaxis.set_minor_locator(matplotlib.dates.HourLocator())
# summary
ax = matplotlib.pyplot.subplot(313)
_plot_summary(series_list)
frame_start = now - datetime.timedelta(days=SUMMARY_DAYS)
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%b.'))
ax.xaxis.set_ticks(_month_locator(frame_start, now, TIMEZONE))
ax.xaxis.set_ticks(_week_locator(frame_start, now, TIMEZONE), minor=True)
# save file
matplotlib.pyplot.savefig(file, bbox_inches='tight')
matplotlib.pyplot.close()
# matplotlib.dates.RRuleLocator is bugged at dst transitions
# http://matplotlib.org/api/dates_api.html#matplotlib.dates.RRuleLocator
# https://github.com/matplotlib/matplotlib/issues/2737/
# https://github.com/dateutil/dateutil/issues/102
def _month_locator(start, end, tz):
lower = start.astimezone(tz).date().replace(day=1)
upper = end.astimezone(tz).date()
rule = dateutil.rrule.rrule(dateutil.rrule.MONTHLY,
dtstart=lower, until=upper)
return [tz.localize(dt) for dt in rule if start <= tz.localize(dt) <= end]
def _week_locator(start, end, tz):
lower = start.astimezone(tz).date()
upper = end.astimezone(tz).date()
rule = dateutil.rrule.rrule(dateutil.rrule.WEEKLY,
byweekday=dateutil.rrule.MO,
dtstart=lower, until=upper)
return [tz.localize(dt) for dt in rule if start <= tz.localize(dt) <= end]
def _day_locator(start, end, tz):
lower = start.astimezone(tz).date()
upper = end.astimezone(tz).date()
rule = dateutil.rrule.rrule(dateutil.rrule.DAILY,
dtstart=lower, until=upper)
return [tz.localize(dt) for dt in rule if start <= tz.localize(dt) <= end]
def _hour_locator(start, end, step, tz):
lower = start.astimezone(tz).date()
upper = end.astimezone(tz).replace(tzinfo=None)
rule = dateutil.rrule.rrule(dateutil.rrule.HOURLY,
byhour=range(0, 24, step),
dtstart=lower, until=upper)
return [tz.localize(dt) for dt in rule if start <= tz.localize(dt) <= end]
def _universal_parser(value):
if value == 'False':
return False
elif value == 'True':
return True
else:
return float(value)
def _format_timedelta(td):
ret = list()
hours = td.days * 24 + td.seconds // 3600
if hours:
ret.append(str(hours))
ret.append('Stunde' if hours == 1 else 'Stunden')
minutes = (td.seconds // 60) % 60
ret.append(str(minutes))
ret.append('Minute' if minutes == 1 else 'Minuten')
return ' '.join(ret)
def _format_timestamp(ts):
ts = ts.astimezone(TIMEZONE)
local_now = now.astimezone(TIMEZONE)
if ts.date() == local_now.date():
return 'um {:%H:%M} Uhr'.format(ts)
if local_now.date() - ts.date() == datetime.timedelta(days=1):
return 'gestern um {:%H:%M} Uhr'.format(ts)
if local_now.date() - ts.date() < datetime.timedelta(days=7):
return 'am {:%A um %H:%M} Uhr'.format(ts)
if ts.year == local_now.year:
return 'am {:%d. %B um %H:%M} Uhr'.format(ts)
return 'am {:%d. %B %Y um %H:%M} Uhr'.format(ts)
def _format_temperature(record, low, high):
if not record:
return 'Keine Daten empfangen'
text = '{:.1f} °C {}'.format(record.value,
_format_timestamp(record.timestamp))
if low <= record.value <= high:
return text
return '<mark>{}</mark>'.format(text)
def _format_switch(record):
if not record:
return 'Keine Daten empfangen'
return '{} {}'.format('Ein' if record.value else 'Aus',
_format_timestamp(record.timestamp))
class Series(object):
text = None
def __init__(self, name, interval, fail_notify):
self.name = name
self.interval = datetime.timedelta(seconds=interval)
self.notify = fail_notify
self.fail_status = False
self.fail_counter = int()
self.records = collections.deque()
self.summary = collections.deque()
self._read(now.year - 1)
self._read(now.year)
self._clear()
def __str__(self):
ret = list()
first, *lines = self.text
lines.append('Aktualisierung alle {}'.format(
_format_timedelta(self.interval)))
ret.append('<strong>{}</strong>'.format(first))
ret.append('<ul>')
for line in lines:
ret.append('<li>{}</li>'.format(line))
ret.append('</ul>')
return '\n'.join(ret)
def _append(self, record):
if self.records and record.timestamp <= self.records[-1].timestamp:
raise OlderThanPreviousError('{}: previous {}, new {}'.format(
self.name, self.records[-1].timestamp.timestamp(),
record.timestamp.timestamp()))
self.records.append(record)
if (len(self.records) >= 3 and self.records[-3].value ==
self.records[-2].value == self.records[-1].value and
self.records[-1].timestamp - self.records[-3].timestamp <
ALLOWED_DOWNTIME):
del self.records[-2]
def _clear(self):
while (self.records and self.records[0].timestamp < now -
datetime.timedelta(RECORD_DAYS)):
self.records.popleft()
while (self.summary and self.summary[0].date < (now -
datetime.timedelta(SUMMARY_DAYS)).astimezone(TIMEZONE).date()):
self.summary.popleft()
def _read(self, year):
filename = '{}/{}_{}.csv'.format(DATA_DIR, self.name, year)
try:
with open(filename, newline='') as csv_file:
for row in csv.reader(csv_file):
timestamp = datetime.datetime.fromtimestamp(
int(row[0]), tz=datetime.timezone.utc)
value = _universal_parser(row[1])
record = Record(timestamp, value)
try:
self._append(record)
except OlderThanPreviousError:
# FIXME: remove this except, instead don't save invalid data
continue
self._summarize(record)
except OSError:
pass
@property
def current(self):
if (self.records and now - self.records[-1].timestamp <=
ALLOWED_DOWNTIME):
return self.records[-1]
else:
return None
@property
def error(self):
if not self.notify:
return None
if self.current:
self.fail_status = False
return None
if not self.fail_status:
self.fail_status = True
self.fail_counter += 1
return 'Messpunkt "{}" liefert keine Daten. (#{})'.format(
self.name, self.fail_counter)
@property
def day(self):
min_time = now - datetime.timedelta(days=1)
start = len(self.records)
while start > 0 and self.records[start - 1].timestamp >= min_time:
start -= 1
return itertools.islice(self.records, start, None)
def save(self, record):
try:
self._append(record)
except OlderThanPreviousError as err:
logging.warning('ignore {}'.format(err))
return
self._summarize(record)
self._clear()
class Temperature(Series):
def __init__(self, low, high, *args):
self.low = low
self.high = high
self.date = datetime.date.min
self.today = None
super().__init__(*args)
@classmethod
def minmax(cls, records):
minimum = maximum = None
for record in records:
if not minimum or record.value <= minimum.value:
minimum = record
if not maximum or record.value >= maximum.value:
maximum = record
return minimum, maximum
def _summarize(self, record):
date = record.timestamp.astimezone(TIMEZONE).date()
if date > self.date:
if self.today:
self.summary.append(Summary(self.date,
min(self.today), max(self.today)))
self.date = date
self.today = list()
self.today.append(record.value)
@property
def text(self):
minimum, maximum = self.minmax(self.records)
minimum_d, maximum_d = self.minmax(self.day)
yield '{}: {}'.format(
self.name, _format_temperature(self.current, self.low, self.high))
if minimum_d:
yield 'Letzte 24 Stunden: ▼ {} / ▲ {}'.format(
_format_temperature(minimum_d, self.low, self.high),
_format_temperature(maximum_d, self.low, self.high))
if minimum:
yield 'Letzte 7 Tage: ▼ {} / ▲ {}'.format(
_format_temperature(minimum, self.low, self.high),
_format_temperature(maximum, self.low, self.high))
yield 'Warnbereich unter {:.0f} °C und über {:.0f} °C'.format(
self.low, self.high)
@property
def warning(self):
current = self.current
if not current:
return None
if current.value < self.low:
return 'Messpunkt "{}" unter {} °C.'.format(self.name, self.low)
if current.value > self.high:
return 'Messpunkt "{}" über {} °C.'.format(self.name, self.high)
return None
class Switch(Series):
def __init__(self, *args):
self.date = None
super().__init__(*args)
@classmethod
def uptime(cls, segments):
total = datetime.timedelta()
for start, stop in segments:
total += stop - start
return total
@classmethod
def segments(cls, records):
expect = True
for timestamp, value in records:
# assume false during downtime
if not expect and timestamp - running > ALLOWED_DOWNTIME:
expect = True
yield start, running
if value:
running = timestamp
# identify segments
if expect != value:
continue
if expect:
expect = False
start = timestamp
else:
expect = True
yield start, timestamp
if not expect:
yield start, running
def _summarize(self, record): # TODO record.value not used
date = record.timestamp.astimezone(TIMEZONE).date()
if not self.date:
self.date = date
return
if date <= self.date:
return
lower = datetime.datetime.combine(self.date, datetime.time.min)
lower = TIMEZONE.localize(lower)
upper = datetime.datetime.combine(
self.date + datetime.timedelta(days=1),
datetime.time.min)
upper = TIMEZONE.localize(upper)
total = datetime.timedelta()
for start, end in self.segments(self.records):
if end <= lower or start >= upper:
continue
if start < lower:
start = lower
if end > upper:
end = upper
total += end - start
hours = total / datetime.timedelta(hours=1)
self.summary.append(Uptime(self.date, hours))
self.date = date
@property
def text(self):
last_false = last_true = None
for record in reversed(self.records):
if record.value:
if not last_true:
last_true = record
elif not last_false:
last_false = record
if last_false and last_true:
break
current = self.current
yield '{}: {}'.format(self.name, _format_switch(current))
if last_true and (not current or not current.value):
yield 'Zuletzt {}'.format(_format_switch(last_true))
if last_false and (not current or current.value):
yield 'Zuletzt {}'.format(_format_switch(last_false))
yield 'Letzte 24 Stunden: Einschaltdauer {}'.format(
_format_timedelta(self.uptime(self.segments(self.day))))
yield 'Letzte 7 Tage: Einschaltdauer {}'.format(
_format_timedelta(self.uptime(self.segments(self.records))))
@property
def warning(self):
return None
class OlderThanPreviousError(Exception):
pass
if __name__ == "__main__":
main()
| gpl-3.0 |
henrykironde/scikit-learn | sklearn/tests/test_grid_search.py | 83 | 28713 | """
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler,
ChangedBehaviorWarning)
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import KFold, StratifiedKFold, FitFailedWarning
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
sameera2004/chxtools | chxtools/plot.py | 3 | 5392 | import numpy as np
import subprocess
from dataportal import DataBroker, DataMuxer
from dataportal.broker import EventQueue
import matplotlib.pyplot as plt
import time as ttime
import sys
from ophyd.userapi.scan_api import estimate
def new_queue(header, queue=None):
if queue is None:
queue = EventQueue(header)
return header, queue
hdr = DataBroker[-1]
if header.scan_id != hdr.scan_id:
print("New header found: Scan id = %s. uid = %s" %
(hdr.scan_id, hdr.run_start_uid))
sys.stdout.flush()
queue = EventQueue(hdr)
return hdr, queue
return header, queue
vlines = {'center_of_mass': {'color': 'red'},
'cen': {'color': 'red', 'ls': '--'},}
hlines = {'avgy': {'color': 'blue', 'ls': '-'},
'ymin': {'color': 'black', 'ls': '--'},
'ymax': {'color': 'black', 'ls': '--'}, }
points = {'cen': {'color': 'red', 'marker': 'o'},
'fwmh_left': {'color': 'red', 'marker': '<'},
'fwhm_right': {'color': 'red', 'marker': '>'}}
def plot1d(y, x=None, scans=None, live=True, sleep_time=1):
"""Plot live data and on-the-fly peak stats estimator
Parameters
----------
y : str
The name of the y value to plot
x : str, optional
The name of the value to plot on the x axis. If None, defaults
to the sequence number of the event (Note that this probably works,
but I'm not sure as it has not been tested!)
scans : list, optional
List of other scan indices to plot. uses db[] syntax, so any valid
entry to [] will work
live : bool, optional
Grab new data and plot it as it comes off. Defaults to True.
sleep_time : float, optional
Time to sleep between data updates. Defaults to 1 sec
"""
if scans is None:
scans = []
lines1 = {}
fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(15,10), sharex=True)
fig.show()
for scan_id in scans:
hdr = DataBroker[scan_id]
events = DataBroker.fetch_events(hdr)
dm = DataMuxer.from_events(events)
df = dm.to_sparse_dataframe()
if x is None:
old_x = np.asarray(df.index)
else:
old_x = np.asarray(df[x])
old_y = np.asarray(df[y])
lines1[scan_id], = ax1.plot(old_x, old_y, 'o', ms=15, label=scan_id)
if x is None:
ax1.set_xlabel('scan point index')
ax2.set_xlabel('scan point index')
else:
ax1.set_xlabel(x)
ax2.set_xlabel(x)
ax1.set_ylabel(y)
ax2.set_ylabel(y)
ax1.set_title('data stream')
ax2.set_title('peak estimator')
if live:
hdr = DataBroker[-1]
scan_id = hdr.scan_id
while scan_id in lines1:
ttime.sleep(.5)
hdr = DataBroker[-1]
scan_id = hdr.scan_id
lines1[scan_id], = ax1.plot([], [], 'o', ms=15, label=scan_id)
queue = None
prev_stats = None
while True:
# loop until killed
hdr, queue = new_queue(hdr, queue)
scan_id = hdr.scan_id
queue.update()
new_events = queue.get()
try:
old_x, old_y = lines1[scan_id].get_data()
old_x = list(old_x)
old_y = list(old_y)
except KeyError:
lines1[scan_id], = ax1.plot([], [], 'o', ms=15, label=scan_id)
old_x, old_y = [], []
if x is None:
new_x = [event.seq_num for ev in new_events]
else:
new_x = [ev['data'][x] for ev in new_events]
new_y = [ev['data'][y] for ev in new_events]
new_x = old_x + new_x
new_y = old_y + new_y
lines1[scan_id].set_data(new_x, new_y)
ax1.relim(visible_only=True)
ax1.legend(loc=0).draggable()
# now deal with axis 2
try:
stats = estimate(np.asarray(new_x), np.asarray(new_y))
except ValueError:
stats = prev_stats
# print(stats)
if stats != prev_stats:
ax2.cla()
ax2.plot(new_x, new_y, 'o', ms=15, label=scan_id)
ax2.set_title('peak estimator')
for stat, vals in stats.items():
if stat in points:
# sometimes 'cen' comes back as one or two values. This
# try/except block is a way to do the right thing when
# this happens
try:
vals[0]
ax2.scatter(vals[0], vals[1], label=stat, **points[stat])
except IndexError:
ax2.axvline(vals, label=stat, **vlines[stat])
elif stat in hlines:
# draw a horizontal line
ax2.axhline(vals, label=stat, **hlines[stat])
elif stat in vlines:
# draw a vertical line
ax2.axvline(vals, label=stat, **vlines[stat])
prev_stats = stats
ax2.relim(visible_only=True)
ax2.legend(loc=0).draggable()
fig.canvas.draw()
fig.canvas.flush_events()
ttime.sleep(sleep_time)
| bsd-3-clause |
lanselin/pysal | pysal/esda/tests/test_moran.py | 5 | 7346 | import unittest
import pysal
from .. import moran
from ...common import pandas, RTOL, ATOL
import numpy as np
PANDAS_EXTINCT = pandas is None
class Moran_Tester(unittest.TestCase):
def setUp(self):
self.w = pysal.open(pysal.examples.get_path("stl.gal")).read()
f = pysal.open(pysal.examples.get_path("stl_hom.txt"))
self.y = np.array(f.by_col['HR8893'])
def test_moran(self):
mi = moran.Moran(self.y, self.w, two_tailed=False)
np.testing.assert_allclose(mi.I, 0.24365582621771659, rtol=RTOL, atol=ATOL)
self.assertAlmostEquals(mi.p_norm, 0.00013573931385468807)
def test_sids(self):
w = pysal.open(pysal.examples.get_path("sids2.gal")).read()
f = pysal.open(pysal.examples.get_path("sids2.dbf"))
SIDR = np.array(f.by_col("SIDR74"))
mi = pysal.Moran(SIDR, w, two_tailed=False)
np.testing.assert_allclose(mi.I, 0.24772519320480135, atol=ATOL, rtol=RTOL)
self.assertAlmostEquals(mi.p_norm, 5.7916539074498452e-05)
@unittest.skipIf(PANDAS_EXTINCT, 'missing pandas')
def test_by_col(self):
import pysal.contrib.pdio as pdio
df = pdio.read_files(pysal.examples.get_path('sids2.dbf'))
w = pysal.open(pysal.examples.get_path("sids2.gal")).read()
mi = moran.Moran.by_col(df, ['SIDR74'], w=w, two_tailed=False)
sidr = np.unique(mi.SIDR74_moran.values)
pval = np.unique(mi.SIDR74_p_sim.values)
np.testing.assert_allclose(sidr, 0.24772519320480135, atol=ATOL, rtol=RTOL)
self.assertAlmostEquals(pval, 0.001)
class Moran_Rate_Tester(unittest.TestCase):
def setUp(self):
self.w = pysal.open(pysal.examples.get_path("sids2.gal")).read()
f = pysal.open(pysal.examples.get_path("sids2.dbf"))
self.e = np.array(f.by_col['SID79'])
self.b = np.array(f.by_col['BIR79'])
def test_moran_rate(self):
mi = moran.Moran_Rate(self.e, self.b, self.w, two_tailed=False)
np.testing.assert_allclose(mi.I, 0.16622343552567395, rtol=RTOL, atol=ATOL)
self.assertAlmostEquals(mi.p_norm, 0.004191499504892171)
@unittest.skipIf(PANDAS_EXTINCT, 'missing pandas')
def test_by_col(self):
import pysal.contrib.pdio as pdio
df = pdio.read_files(pysal.examples.get_path('sids2.dbf'))
mi = moran.Moran_Rate.by_col(df, ['SID79'], ['BIR79'], w=self.w, two_tailed=False)
sidr = np.unique(mi["SID79-BIR79_moran_rate"].values)
pval = np.unique(mi["SID79-BIR79_p_sim"].values)
np.testing.assert_allclose(sidr, 0.16622343552567395, rtol=RTOL, atol=ATOL)
self.assertAlmostEquals(pval, 0.009)
class Moran_BV_matrix_Tester(unittest.TestCase):
def setUp(self):
f = pysal.open(pysal.examples.get_path("sids2.dbf"))
varnames = ['SIDR74', 'SIDR79', 'NWR74', 'NWR79']
self.names = varnames
vars = [np.array(f.by_col[var]) for var in varnames]
self.vars = vars
self.w = pysal.open(pysal.examples.get_path("sids2.gal")).read()
def test_Moran_BV_matrix(self):
res = moran.Moran_BV_matrix(self.vars, self.w, varnames=self.names)
self.assertAlmostEquals(res[(0, 1)].I, 0.19362610652874668)
self.assertAlmostEquals(res[(3, 0)].I, 0.37701382542927858)
class Moran_Local_Tester(unittest.TestCase):
def setUp(self):
np.random.seed(10)
self.w = pysal.open(pysal.examples.get_path("desmith.gal")).read()
f = pysal.open(pysal.examples.get_path("desmith.txt"))
self.y = np.array(f.by_col['z'])
def test_Moran_Local(self):
lm = moran.Moran_Local(
self.y, self.w, transformation="r", permutations=99)
self.assertAlmostEquals(lm.z_sim[0], -0.68493799168603808)
self.assertAlmostEquals(lm.p_z_sim[0], 0.24669152541631179)
@unittest.skipIf(PANDAS_EXTINCT, 'missing pandas')
def test_by_col(self):
import pandas as pd
df = pd.DataFrame(self.y, columns =['z'])
lm = moran.Moran_Local.by_col(df, ['z'], w=self.w, transformation='r',
permutations=99, outvals=['z_sim', 'p_z_sim'])
self.assertAlmostEquals(lm.z_z_sim[0], -0.68493799168603808)
self.assertAlmostEquals(lm.z_p_z_sim[0], 0.24669152541631179)
class Moran_Local_BV_Tester(unittest.TestCase):
def setUp(self):
np.random.seed(10)
self.w = pysal.open(pysal.examples.get_path("sids2.gal")).read()
f = pysal.open(pysal.examples.get_path("sids2.dbf"))
self.x = np.array(f.by_col['SIDR79'])
self.y = np.array(f.by_col['SIDR74'])
def test_Moran_Local_BV(self):
lm = moran.Moran_Local_BV(self.x, self.y, self.w,
transformation="r", permutations=99)
self.assertAlmostEquals(lm.Is[0], 1.4649221250620736)
self.assertAlmostEquals(lm.z_sim[0], 1.5816540860500772)
self.assertAlmostEquals(lm.p_z_sim[0], 0.056864279811026153)
@unittest.skipIf(PANDAS_EXTINCT, 'missing pandas')
def test_by_col(self):
import pysal.contrib.pdio as pdio
df = pdio.read_files(pysal.examples.get_path('sids2.dbf'))
np.random.seed(12345)
moran.Moran_Local_BV.by_col(df, ['SIDR74', 'SIDR79'], w=self.w,
inplace=True, outvals=['z_sim', 'p_z_sim'],
transformation='r', permutations=99)
bvstats = df['SIDR79-SIDR74_moran_local_bv'].values
bvz = df['SIDR79-SIDR74_z_sim'].values
bvzp = df['SIDR79-SIDR74_p_z_sim'].values
self.assertAlmostEquals(bvstats[0], 1.4649221250620736)
self.assertAlmostEquals(bvz[0], 1.657427, 5)
self.assertAlmostEquals(bvzp[0], 0.048717, 5)
class Moran_Local_Rate_Tester(unittest.TestCase):
def setUp(self):
np.random.seed(10)
self.w = pysal.open(pysal.examples.get_path("sids2.gal")).read()
f = pysal.open(pysal.examples.get_path("sids2.dbf"))
self.e = np.array(f.by_col['SID79'])
self.b = np.array(f.by_col['BIR79'])
def test_moran_rate(self):
lm = moran.Moran_Local_Rate(self.e, self.b, self.w,
transformation="r", permutations=99)
self.assertAlmostEquals(lm.z_sim[0], -0.13699844503985936, 7)
self.assertAlmostEquals(lm.p_z_sim[0], 0.44551601210081715)
@unittest.skipIf(PANDAS_EXTINCT, 'missing pandas')
def test_by_col(self):
import pysal.contrib.pdio as pdio
df = pdio.read_files(pysal.examples.get_path('sids2.dbf'))
lm = moran.Moran_Local_Rate.by_col(df, ['SID79'], ['BIR79'], w=self.w,
outvals=['p_z_sim', 'z_sim'],
transformation='r', permutations=99)
self.assertAlmostEquals(lm['SID79-BIR79_z_sim'][0], -0.13699844503985936, 7)
self.assertAlmostEquals(lm['SID79-BIR79_p_z_sim'][0], 0.44551601210081715)
suite = unittest.TestSuite()
test_classes = [Moran_Tester, Moran_Rate_Tester,
Moran_BV_matrix_Tester, Moran_Local_Tester,
Moran_Local_BV_Tester, Moran_Local_Rate_Tester]
for i in test_classes:
a = unittest.TestLoader().loadTestsFromTestCase(i)
suite.addTest(a)
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite)
| bsd-3-clause |
coupdair/pyoptools | pyoptools/gui/glwindow.py | 9 | 19554 | #!/usr/bin/env python
# This includes the two classes wxGLWindow and wxAdvancedGLWindow
# from OpenGL.TK in the PyOpenGL distribution
# ported to wxPython by greg Landrum
# modified by Y. Wong
# modified by R. Amezquita
# modified by O. Olarte
from OpenGL.GL import *
from OpenGL.GLU import *
from wx import *
from wx.glcanvas import *
import math
import os,sys
def test_data(npoints):
#A simple testing function that generate random triangles
# 10 random points (x,y) in the plane
import numpy
import matplotlib.delaunay as triang
x,y = numpy.array(numpy.random.standard_normal((2,npoints)))
z = numpy.array(numpy.random.standard_normal(npoints))
points=[]
for i in range(npoints):
points.append((x[i],y[i],z[i]))
cens,edg,tri,neig = triang.delaunay(x,y)
return points, tri
def glTranslateScene(s, x, y, mousex, mousey):
glMatrixMode(GL_MODELVIEW)
mat = glGetDoublev(GL_MODELVIEW_MATRIX)
glLoadIdentity()
glTranslatef(s * (x - mousex), s * (mousey - y), 0.0)
glMultMatrixd(mat)
def glRotateScene(s, xcenter, ycenter, zcenter, x, y, mousex, mousey):
glMatrixMode(GL_MODELVIEW)
mat = glGetDoublev(GL_MODELVIEW_MATRIX)
glLoadIdentity()
glTranslatef(xcenter, ycenter, zcenter)
glRotatef(s * (y - mousey), 1., 0., 0.)
glRotatef(s * (x - mousex), 0., 1., 0.)
glTranslatef(-xcenter, -ycenter, -zcenter)
glMultMatrixd(mat)
def v3distsq(a,b):
d = ( a[0] - b[0], a[1] - b[1], a[2] - b[2] )
return d[0]*d[0] + d[1]*d[1] + d[2]*d[2]
# This code is needed to avoid faults on sys.exit()
import sys
oldexitfunc = None
if hasattr(sys, 'exitfunc'):
oldexitfunc = sys.exitfunc
def cleanup():
if oldexitfunc: oldexitfunc()
sys.exitfunc = cleanup
class wxGLWindow(GLCanvas):
"""Implements a simple wxPython OpenGL window.
This class provides a simple window, into which GL commands can be issued. This is done by overriding the built in functions InitGL(), DrawGL(), and FinishGL(). The main difference between it and the plain wxGLCanvas is that it copes with refreshing and resizing the window"""
def __init__(self, parent,*args,**kw):
self.GL_uninitialised = 1
apply(GLCanvas.__init__,(self, parent)+args, kw)
EVT_SIZE(self,self.wxSize)
EVT_PAINT(self,self.wxPaint)
EVT_ERASE_BACKGROUND(self, self.wxEraseBackground)
self.w, self.h = self.GetClientSizeTuple()
def __del__(self):
# self.SetCurrent()
self.FinishGL()
def InitGL(self):
"""OpenGL initialisation routine (to be overridden).
This routine, containing purely OpenGL commands, should be overridden by the user to set up the GL scene.
If it is not overridden, it defaults to setting an ambient light, setting the background colour to gray,
and enabling GL_DEPTH_TEST and GL_COLOR_MATERIAL."""
#set up lighting
glLightfv(GL_LIGHT0, GL_AMBIENT, [1.0, 1.0, 1.0, 1.0])
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glClearColor(0.7,0.7,0.7,0.0)
glShadeModel(GL_SMOOTH)
glDepthFunc(GL_LESS)
glEnable(GL_DEPTH_TEST)
glEnable(GL_COLOR_MATERIAL)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def FinishGL(self):
"""OpenGL closing routine (to be overridden).
This routine should be overridden if necessary by any OpenGL commands need to be specified when deleting the GLWindow (e.g. deleting Display Lists)."""
pass
def DrawGL(self):
"""OpenGL drawing routine (to be overridden).
This routine, containing purely OpenGL commands, should be overridden by the user to draw the GL scene.
If it is not overridden, it defaults to drawing a colour cube."""
#Draw colour cube
glBegin(GL_QUAD_STRIP)
glColor3f(1.0,1.0,1.0) #corner 1
glNormal3f(0.57735027, 0.57735027, 0.57735027)
glVertex3f(0.5, 0.5, 0.5)
glColor3f(1.0,0.0,1.0) #corner 2
glNormal3f(0.57735027, -0.57735027, 0.57735027)
glVertex3f(0.5, -0.5, 0.5)
glColor3f(1.0,1.0,0.0) #corner 3
glNormal3f(0.57735027, 0.57735027, -0.57735027)
glVertex3f(0.5, 0.5, -0.5)
glColor3f(1.0,0.0,0.0) #corner 4
glNormal3f(0.57735027, -0.57735027, -0.57735027)
glVertex3f(0.5, -0.5, -0.5)
glColor3f(0.0,1.0,0.0) #corner 5
glNormal3f(-0.57735027, 0.57735027, -0.57735027)
glVertex3f(-0.5, 0.5, -0.5)
glColor3f(0.0,0.0,0.0) #corner 6
glNormal3f(-0.57735027, -0.57735027, -0.57735027)
glVertex3f(-0.5, -0.5, -0.5)
glColor3f(0.0,1.0,1.0) #corner 7
glNormal3f(-0.57735027, 0.57735027, 0.57735027)
glVertex3f(-0.5, 0.5, 0.5)
glColor3f(0.0,0.0,1.0) #corner 8
glNormal3f(-0.57735027, -0.57735027, 0.57735027)
glVertex3f(-0.5, -0.5, 0.5)
glColor3f(1.0,1.0,1.0) #corner 1
glNormal3f(0.57735027, 0.57735027, 0.57735027)
glVertex3f(0.5, 0.5, 0.5)
glColor3f(1.0,0.0,1.0) #corner 2
glNormal3f(0.57735027, -0.57735027, 0.57735027)
glVertex3f(0.5, -0.5, 0.5)
glEnd()
glBegin(GL_QUADS)
glColor3f(1.0,1.0,1.0) #corner 1
glNormal3f(0.57735027, 0.57735027, 0.57735027)
glVertex3f(0.5, 0.5, 0.5)
glColor3f(1.0,1.0,0.0) #corner 3
glNormal3f(0.57735027, 0.57735027, -0.57735027)
glVertex3f(0.5, 0.5, -0.5)
glColor3f(0.0,1.0,0.0) #corner 5
glNormal3f(-0.57735027, 0.57735027, -0.57735027)
glVertex3f(-0.5, 0.5, -0.5)
glColor3f(0.0,1.0,1.0) #corner 7
glNormal3f(-0.57735027, 0.57735027, 0.57735027)
glVertex3f(-0.5, 0.5, 0.5)
glColor3f(1.0,0.0,1.0) #corner 2
glNormal3f(0.57735027, -0.57735027, 0.57735027)
glVertex3f(0.5, -0.5, 0.5)
glColor3f(1.0,0.0,0.0) #corner 4
glNormal3f(0.57735027, -0.57735027, -0.57735027)
glVertex3f(0.5, -0.5, -0.5)
glColor3f(0.0,0.0,0.0) #corner 6
glNormal3f(-0.57735027, -0.57735027, -0.57735027)
glVertex3f(-0.5, -0.5, -0.5)
glColor3f(0.0,0.0,1.0) #corner 8
glNormal3f(-0.57735027, -0.57735027, 0.57735027)
glVertex3f(-0.5, -0.5, 0.5)
glEnd()
def wxSize(self, event = None):
"""Called when the window is resized"""
self.w,self.h = self.GetClientSizeTuple()
if self.GetContext():
self.SetCurrent()
glViewport(0, 0, self.w, self.h)
self.Update()
def wxEraseBackground(self, event):
"""Routine does nothing, but prevents flashing"""
pass
def wxPaint(self, event=None):
"""Called on a paint event.
This sets the painting drawing context, then calls the base routine wxRedrawGL()"""
dc = PaintDC(self)
self.wxRedrawGL(event)
def wxRedraw(self, event=None):
"""Called on a redraw request
This sets the drawing context, then calls the base routine wxRedrawGL(). It can be called by the user when a refresh is needed"""
dc = ClientDC(self)
self.wxRedrawGL(event)
def wxRedrawGL(self, event=None):
"""This is the routine called when drawing actually takes place.
It needs to be separate so that it can be called by both paint events and by other events. It should not be called directly"""
self.SetCurrent()
if self.GL_uninitialised:
glViewport(0, 0, self.w, self.h)
self.InitGL()
self.GL_uninitialised=0
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
self.DrawGL() # Actually draw here
glPopMatrix();
glFlush() # Flush
self.SwapBuffers() # Swap buffers
if event: event.Skip() # Pass event up
self.Update()
class wxAdvancedGLWindow(wxGLWindow):
"""Implements a wxPython OpenGL window allowing spinning, zooming, etc.
This class is derived from wxGLWindow, and can be used in exactly the
same way, by overriding the functions InitGL(), FinishGL(), and DrawGL()
with functions containing OpenGL commands. The window captures mouse
events, and keypresses. You might want to override some of these
functions if you need more sophisticated control"""
def __init__(self, parent,*args,**kw):
if kw.has_key('autospin_allowed'):
# Is the widget allowed to autospin?
self.autospin_allowed = kw['autospin_allowed']
del kw['autospin_allowed']
else:
self.autospin_allowed = 0
apply(wxGLWindow.__init__,(self, parent)+args, kw)
# The _back color
self.r_back = 0.
self.g_back = 0.
self.b_back = 0.
# Where the eye is
#self.base_distance = self.distance = 10.0
self.base_distance = self.distance = 1000.0
# Field of view in y direction
self.fovy = 30.0
# Position of clipping planes.
self.near = 0.1
self.far = 10000.0
# Where we are centering.
self.xcenter = 0.0
self.ycenter = 0.0
self.zcenter = 0.0
self.parent = parent
# Current coordinates of the mouse.
self.xmouse = 0
self.ymouse = 0
self.xspin = 0
self.yspin = 0
# Is the widget currently autospinning?
self.autospin = 0
self.initLeft = (0,0)
EVT_SIZE(self,self.wxSize)
EVT_PAINT(self,self.wxPaint)
EVT_ERASE_BACKGROUND(self, self.wxEraseBackground)
EVT_CHAR(self,self.OnChar)
EVT_LEFT_DOWN(self,self.OnLeftClick)
EVT_LEFT_DCLICK(self,self.OnLeftDClick)
EVT_LEFT_UP(self,self.OnLeftUp)
EVT_MIDDLE_DOWN(self,self.OnMiddleClick)
EVT_RIGHT_DOWN(self,self.OnRightClick)
EVT_RIGHT_DCLICK(self,self.OnRightDClick)
EVT_MOTION(self,self.wxMouseMotion)
EVT_IDLE(self,self.wxIdle)
def wxIdle(self,event):
if self.autospin:
# self.do_AutoSpin(event) #doing it this way hogs the cpu
# event.RequestMore() #doing it this way hogs the cpu
WakeUpIdle()
self.do_AutoSpin(event)
event.Skip(1)
def OnChar(self,event):
key = event.GetKeyCode()
if key == ord('a'):
self.autospin_allowed = not self.autospin_allowed
if self.autospin:
self.autospin = 0
elif key == ord('q'):
self.parent.Destroy()
def OnLeftClick(self,event):
self.wxRecordMouse(event)
self.initLeft = event.GetX(),event.GetY()
def OnLeftDClick(self,event):
self.wxRecordMouse(event)
self.reset()
def OnLeftUp(self,event):
if not event.m_shiftDown:
self.wxAutoSpin(event)
def OnMiddleClick(self,event):
self.wxRecordMouse(event)
def OnRightClick(self,event):
self.wxRecordMouse(event)
def OnRightDClick(self,event):
self.wxRecordMouse(event)
self.distance=self.base_distance
self.wxRedraw()
def OnLeftDrag(self,event):
self.wxRotate(event)
def OnMiddleDrag(self,event):
self.wxTranslate(event)
def OnRightDrag(self,event):
self.wxScale(event)
def wxMouseMotion(self,event):
if not event.Dragging():
return
if event.LeftIsDown():
self.OnLeftDrag(event)
elif event.MiddleIsDown():
self.OnMiddleDrag(event)
elif event.RightIsDown():
self.OnRightDrag(event)
def report_opengl_errors(message = "OpenGL error:"):
"""Report any opengl errors that occured while drawing."""
while 1:
err_value = glGetError()
if not err_value: break
print message, gluErrorString(err_value)
def SetBgColour(self, r, g, b):
"""Change the background colour of the widget.
There seems to be a problem with this:"""
self.r_back = r
self.g_back = g
self.b_back = b
self.wxRedraw()
def SetCenterpoint(self, x, y, z):
"""Set the new center point for the model.
This is where we are looking."""
self.xcenter = x
self.ycenter = y
self.zcenter = z
self.wxRedraw()
def set_base_distance(self, distance):
"""Set how far the eye is from the position we are looking.
Sets the base distance, to which we are returned if we double click"""
self.base_distance = distance
def set_distance(self, distance):
"""Set how far the eye is from the position we are looking."""
self.distance = distance
self.wxRedraw()
def reset(self):
"""Reset rotation matrix for this widget."""
glMatrixMode(GL_MODELVIEW);
glLoadIdentity()
self.wxRedraw()
# def wxHandlePick(self, event):
# """Handle a pick on the scene."""
# pass
def wxRecordMouse(self, event):
"""Record the current mouse position."""
self.xmouse = event.GetX()
self.ymouse = event.GetY()
def wxStartRotate(self, event):
# Switch off any autospinning if it was happening
self.autospin = 0
self.wxRecordMouse(event)
def wxScale(self, event):
"""Scale the scene. Achieved by moving the eye position."""
scale = 1 - 0.01 * (event.GetY() - self.ymouse)
self.distance = self.distance * scale
self.wxRedraw()
self.wxRecordMouse(event)
def do_AutoSpin(self,event):
s = 0.5
glRotateScene(0.5,
self.xcenter, self.ycenter, self.zcenter,
self.yspin, self.xspin, 0, 0)
self.wxRedraw()
def wxAutoSpin(self, event):
"""Perform autospin of scene."""
if self.autospin_allowed:
self.autospin = 1
self.yspin = .1 * (event.GetX()-self.initLeft[0])
self.xspin = .1 * (event.GetY()-self.initLeft[1])
if self.xspin == 0 and self.yspin == 0:
self.autospin = 0
else:
self.do_AutoSpin(event)
def wxRotate(self, event):
"""Perform rotation of scene."""
if not event.m_shiftDown:
glRotateScene(0.5,
self.xcenter, self.ycenter, self.zcenter,
event.GetX(), event.GetY(), self.xmouse, self.ymouse)
else:
# rotate about z
sz = self.GetClientSizeTuple()
sz = (sz[0]/2, sz[1]/2)
xp = event.GetX()
yp = event.GetY()
dy = (self.ymouse-yp)
dx = (self.xmouse-xp)
if yp > sz[1]:
dx = dx * -1
if xp < sz[0]:
dy = dy * -1
d = dx + dy
glMatrixMode(GL_MODELVIEW);
m = glGetDouble(GL_MODELVIEW_MATRIX)
glLoadIdentity()
glTranslatef(self.xcenter,self.ycenter,self.zcenter)
glRotatef(.5*d,0,0,1.)
glTranslatef(-self.xcenter,-self.ycenter,-self.zcenter)
#glMultMatrixd(ravel(m)) #from Numeric...
glMultMatrixd(m)
self.wxRedraw()
self.wxRecordMouse(event)
def wxTranslate(self, event):
"""Perform translation of scene."""
# Scale mouse translations to object viewplane so object tracks with mouse
win_height = max( 1,self.w)
obj_c = (self.xcenter, self.ycenter, self.zcenter)
win = gluProject( obj_c[0], obj_c[1], obj_c[2] )
obj = gluUnProject( win[0], win[1] + 0.5 * win_height, win[2] )
dist = math.sqrt( v3distsq( obj, obj_c ) )
scale = abs( dist / ( 0.5 * win_height ) )
glTranslateScene(scale, event.GetX(), event.GetY(), self.xmouse, self.ymouse)
self.wxRedraw()
self.wxRecordMouse(event)
def wxRedrawGL(self, event=None):
"""Method used to actually draw the scene.
This is more complex than in the wxGLWindow class from which this
class is derived, as we need to do rotations, translations, etc."""
self.SetCurrent()
if self.GL_uninitialised:
glViewport(0, 0, self.w, self.h)
self.InitGL()
self.GL_uninitialised = 0
# Clear the background and depth buffer.
glClearColor(self.r_back, self.g_back, self.b_back, 0.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glMatrixMode(GL_PROJECTION);
glLoadIdentity()
gluPerspective(self.fovy, float(self.w)/float(self.h), self.near, self.far)
gluLookAt(self.xcenter, self.ycenter, self.zcenter + self.distance,
self.xcenter, self.ycenter, self.zcenter,
0., 1., 0.)
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
self.DrawGL() # Actually draw here
glPopMatrix();
glFlush() # Tidy up
self.SwapBuffers()
if event: event.Skip()
#-----------------------------------------------------
if __name__ == '__main__':
from OpenGL.GLUT import *
import array #for creating the texture map
class MyApp(App):
def OnInit(self):
frame = Frame(None, -1, "wxPython OpenGL example", DefaultPosition, Size(400,400))
#win1 = wxGLWindow(frame, -1, Point(5,5), Size(190,190))
#win2 = wxAdvancedGLWindow(frame, -1, Point(205,5), Size(190,190), autospin_allowed = 0)
win3 = MyWin1_1(frame, -1, Point(5,205), Size(190,190), autospin_allowed = 0)
#win4 = MyWin2(frame, -1, Point(205,205), Size(190,190))
# win1.SetScrollbars(0,0,0,0)
# win4.SetScrollbars(0,0,0,0)
# win3.SetBgColour(0.0,0.0,1.0)
frame.Show(True)
self.SetTopWindow(frame)
return True
class MyWin1(wxAdvancedGLWindow):
"""basic example of a wxAdvancedGLWindow"""
def DrawGL(self):
glColor3f(1.0,0.3,0.3)
glutSolidCone(1.0,2,20,16)
glRotatef(180.0,0.0,1.0,0.0)
glColor3f(0.3,1.0,0.3)
glutSolidCone(1.0,1,20,16)
glLoadIdentity()
class MyWin1_1(wxAdvancedGLWindow):
"""basic example of a wxAdvancedGLWindow with basic triangles"""
def InitGL(self):
self.points,self.polylist=test_data(1000)
print self.points, self.polylist
def DrawGL(self):
glColor4f(.1,.7,.7, 0.5)
for p in self.polylist:
if len(p)==3:
p0=self.points[p[0]]
p1=self.points[p[1]]
p2=self.points[p[2]]
glBegin(GL_TRIANGLES) #Drawing Using Triangles
glVertex3f( p0[0], p0[1], p0[2])
glVertex3f( p1[0], p1[1], p1[2])
glVertex3f( p2[0], p2[1], p2[2])
glEnd()
class MyWin2(wxAdvancedGLWindow):
"""example using display lists"""
def InitGL(self):
self.uninitialised = 1
glClearColor (0.0, 0.0, 0.0, 0.0);
glEnable(GL_DEPTH_TEST);
glShadeModel(GL_SMOOTH);
self.stripeImageWidth=32
temp = array.array('B')
for x in range(5):
temp.fromlist([255,0,0,255])
for x in range(self.stripeImageWidth-5):
temp.fromlist([0,255,0,255])
self.stripeImage = temp.tostring()
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
self.texName=glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, self.texName)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, self.stripeImageWidth,1,0,
GL_RGBA, GL_UNSIGNED_BYTE, [self.stripeImage])
glTexImage2D(GL_TEXTURE_2D, 0, 4, self.stripeImageWidth, 1, 0,
GL_RGBA, GL_UNSIGNED_BYTE, [self.stripeImage])
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE)
glTexGeni(GL_S, GL_TEXTURE_GEN_MODE, GL_EYE_LINEAR)
glTexGenfv(GL_S, GL_EYE_PLANE, [1.0, 1.0, 1.0, 0.0])
glEnable(GL_TEXTURE_GEN_S);
glEnable(GL_TEXTURE_2D);
glEnable(GL_CULL_FACE);
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
glEnable(GL_AUTO_NORMAL);
glEnable(GL_NORMALIZE);
glFrontFace(GL_CW);
glCullFace(GL_BACK);
glMaterialf (GL_FRONT, GL_SHININESS, 64.0);
self.DispList=glGenLists(1)
def DrawGL(self):
if self.uninitialised:
glNewList(self.DispList, GL_COMPILE)
glRotatef(45.0, 0.0, 0.0, 1.0);
glBindTexture(GL_TEXTURE_2D, self.texName);
glutSolidTeapot(2.0);
glEndList()
self.uninitialised = 0
glCallList(self.DispList)
def FinishGL(self):
if self.DispList:
glDeleteLists(self.DispList)
app = MyApp(0)
app.MainLoop()
| bsd-3-clause |
smblance/ggplot | ggplot/tests/test_legend.py | 12 | 5181 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from . import get_assert_same_ggplot, cleanup, assert_same_elements
assert_same_ggplot = get_assert_same_ggplot(__file__)
from nose.tools import (assert_true, assert_raises, assert_is,
assert_is_not, assert_equal)
from ggplot import *
import six
import pandas as pd
from ggplot.components import assign_visual_mapping
from ggplot.utils.exceptions import GgplotError
def get_test_df():
df = pd.DataFrame({
'xmin': [1, 3, 5],
'xmax': [2, 3.5, 7],
'ymin': [1, 4, 6],
'ymax': [5, 5, 9],
'fill': ['blue', 'red', 'green'],
'quality': ['good', 'bad', 'ugly'],
'alpha': [0.1, 0.5, 0.9],
'texture': ['hard', 'soft', 'medium']})
return df
def test_legend_structure():
df = get_test_df()
gg = ggplot(df, aes(xmin='xmin', xmax='xmax', ymin='ymin', ymax='ymax',
colour='quality', fill='fill', alpha='alpha',
linetype='texture'))
new_df, legend = assign_visual_mapping(df, gg.aesthetics, gg)
# All mapped aesthetics must have an entry in the legend
for aesthetic in ('color', 'fill', 'alpha', 'linetype'):
assert(aesthetic in legend)
# None of the unassigned aesthetic should have an entry in the legend
assert('size' not in legend)
assert('shape' not in legend)
# legend entries should remember the column names
# to which they were mapped
assert(legend['fill']['column_name'] == 'fill')
assert(legend['color']['column_name'] == 'quality')
assert(legend['linetype']['column_name'] == 'texture')
assert(legend['alpha']['column_name'] == 'alpha')
# Discrete columns for non-numeric data
assert(legend['fill']['scale_type'] == 'discrete')
assert(legend['color']['scale_type'] == 'discrete')
assert(legend['linetype']['scale_type'] == 'discrete')
assert(legend['alpha']['scale_type'] == 'discrete')
# Alternate
df2 = pd.DataFrame.copy(df)
df2['fill'] = [90, 3.2, 8.1]
gg = ggplot(df2, aes(xmin='xmin', xmax='xmax', ymin='ymin', ymax='ymax',
colour='quality', fill='fill', alpha='alpha',
linetype='texture'))
new_df, legend = assign_visual_mapping(df2, gg.aesthetics, gg)
assert(legend['fill']['scale_type'] == 'discrete')
# Test if legend switches to continuous for more than 8 numerical values
df3 = pd.DataFrame({
'xmin': [1, 3, 5, 8, 2, 1, 4, 7, 9],
'xmax': [2, 3.5, 7, 12, 3, 2, 6, 8, 10],
'ymin': [1, 4, 6, 0, 0, 0, 0, 0, 0],
'ymax': [5, 5, 9, 1, 1, 1, 1, 1, 1],
'fill': ['blue', 'red', 'green', 'green', 'green',
'green', 'green', 'green', 'brown'],
'quality': ['good', 'bad', 'ugly', 'horrible', 'quite awful',
'impertinent', 'jolly', 'hazardous', 'ok'],
'alpha': [0.1, 0.2, 0.4, 0.5, 0.6, 0.65, 0.8, 0.82, 0.83],
'texture': ['hard', 'soft', 'medium', 'fluffy', 'slimy', 'rough',
'edgy', 'corny', 'slanted']
})
gg = ggplot(df2, aes(xmin='xmin', xmax='xmax', ymin='ymin', ymax='ymax',
colour='quality', fill='fill', alpha='alpha',
linetype='texture'))
new_df, legend = assign_visual_mapping(df3, gg.aesthetics, gg)
assert(legend['alpha']['scale_type'] == 'continuous')
# Test if legend raises GgplotError when size and alpha is fed non numeric data
gg = ggplot(df3, aes(size="fill"))
assert_raises(GgplotError, assign_visual_mapping, df3, gg.aesthetics, gg)
gg = ggplot(df3, aes(alpha="fill"))
assert_raises(GgplotError, assign_visual_mapping, df3, gg.aesthetics, gg)
@cleanup
def test_alpha_rect():
df = get_test_df()
p = ggplot(df, aes(xmin='xmin', xmax='xmax', ymin='ymin', ymax='ymax',
colour='quality', fill='fill', alpha='alpha',
linetype='texture'))
p += geom_rect(size=5)
assert_same_ggplot(p, "legend_alpha_rect")
@cleanup
def test_alpha():
diamonds["test"] = diamonds["clarity"].map(len)
p = ggplot(diamonds[::50], aes(x='carat', y='price', colour='test',
size='test', alpha='test'))
#p = ggplot(diamonds[1:60000:50], aes(x='carat', y='price', shape='clarity'))
p = p + geom_point() + ggtitle("Diamonds: A Plot")
p = p + xlab("Carat") + ylab("Price")
assert_same_ggplot(p, "legend_alpha")
@cleanup
def test_linetype():
meat_lng = pd.melt(meat[['date', 'beef', 'pork', 'broilers']], id_vars='date')
p = ggplot(aes(x='date', y='value', colour='variable',
linetype='variable', shape='variable'), data=meat_lng) + \
geom_line() + geom_point() +\
ylim(0, 3000)
assert_same_ggplot(p, "legend_linetype")
@cleanup
def test_shape_alpha():
diamonds["test"] = diamonds["clarity"].map(len)
df = diamonds[::50]
p = ggplot(df, aes(x='carat', y='price', colour='test', size='test',
alpha='test', shape='clarity')) + geom_point()
assert_same_ggplot(p, "legend_shape_alpha")
| bsd-2-clause |
xuanyuanking/spark | python/pyspark/pandas/usage_logging/usage_logger.py | 14 | 4949 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The reference implementation of usage logger using the Python standard logging library.
"""
from inspect import Signature
import logging
from typing import Any, Optional
def get_logger() -> Any:
"""An entry point of the plug-in and return the usage logger."""
return PandasOnSparkUsageLogger()
def _format_signature(signature):
return (
"({})".format(", ".join([p.name for p in signature.parameters.values()]))
if signature is not None
else ""
)
class PandasOnSparkUsageLogger(object):
"""
The reference implementation of usage logger.
The usage logger needs to provide the following methods:
- log_success(self, class_name, name, duration, signature=None)
- log_failure(self, class_name, name, ex, duration, signature=None)
- log_missing(self, class_name, name, is_deprecated=False, signature=None)
"""
def __init__(self):
self.logger = logging.getLogger("pyspark.pandas.usage_logger")
def log_success(
self, class_name: str, name: str, duration: float, signature: Optional[Signature] = None
) -> None:
"""
Log the function or property call is successfully finished.
:param class_name: the target class name
:param name: the target function or property name
:param duration: the duration to finish the function or property call
:param signature: the signature if the target is a function, else None
"""
if self.logger.isEnabledFor(logging.INFO):
msg = (
"A {function} `{class_name}.{name}{signature}` was successfully finished "
"after {duration:.3f} ms."
).format(
class_name=class_name,
name=name,
signature=_format_signature(signature),
duration=duration * 1000,
function="function" if signature is not None else "property",
)
self.logger.info(msg)
def log_failure(
self,
class_name: str,
name: str,
ex: Exception,
duration: float,
signature: Optional[Signature] = None,
) -> None:
"""
Log the function or property call failed.
:param class_name: the target class name
:param name: the target function or property name
:param ex: the exception causing the failure
:param duration: the duration until the function or property call fails
:param signature: the signature if the target is a function, else None
"""
if self.logger.isEnabledFor(logging.WARNING):
msg = (
"A {function} `{class_name}.{name}{signature}` was failed "
"after {duration:.3f} ms: {msg}"
).format(
class_name=class_name,
name=name,
signature=_format_signature(signature),
msg=str(ex),
duration=duration * 1000,
function="function" if signature is not None else "property",
)
self.logger.warning(msg)
def log_missing(
self,
class_name: str,
name: str,
is_deprecated: bool = False,
signature: Optional[Signature] = None,
) -> None:
"""
Log the missing or deprecated function or property is called.
:param class_name: the target class name
:param name: the target function or property name
:param is_deprecated: True if the function or property is marked as deprecated
:param signature: the original function signature if the target is a function, else None
"""
if self.logger.isEnabledFor(logging.INFO):
msg = "A {deprecated} {function} `{class_name}.{name}{signature}` was called.".format(
class_name=class_name,
name=name,
signature=_format_signature(signature),
function="function" if signature is not None else "property",
deprecated="deprecated" if is_deprecated else "missing",
)
self.logger.info(msg)
| apache-2.0 |
nicklhy/mxnet | example/rcnn/rcnn/core/tester.py | 25 | 10193 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import cPickle
import os
import time
import mxnet as mx
import numpy as np
from module import MutableModule
from rcnn.logger import logger
from rcnn.config import config
from rcnn.io import image
from rcnn.processing.bbox_transform import bbox_pred, clip_boxes
from rcnn.processing.nms import py_nms_wrapper, cpu_nms_wrapper, gpu_nms_wrapper
class Predictor(object):
def __init__(self, symbol, data_names, label_names,
context=mx.cpu(), max_data_shapes=None,
provide_data=None, provide_label=None,
arg_params=None, aux_params=None):
self._mod = MutableModule(symbol, data_names, label_names,
context=context, max_data_shapes=max_data_shapes)
self._mod.bind(provide_data, provide_label, for_training=False)
self._mod.init_params(arg_params=arg_params, aux_params=aux_params)
def predict(self, data_batch):
self._mod.forward(data_batch)
return dict(zip(self._mod.output_names, self._mod.get_outputs()))
def im_proposal(predictor, data_batch, data_names, scale):
data_dict = dict(zip(data_names, data_batch.data))
output = predictor.predict(data_batch)
# drop the batch index
boxes = output['rois_output'].asnumpy()[:, 1:]
scores = output['rois_score'].asnumpy()
# transform to original scale
boxes = boxes / scale
return scores, boxes, data_dict
def generate_proposals(predictor, test_data, imdb, vis=False, thresh=0.):
"""
Generate detections results using RPN.
:param predictor: Predictor
:param test_data: data iterator, must be non-shuffled
:param imdb: image database
:param vis: controls visualization
:param thresh: thresh for valid detections
:return: list of detected boxes
"""
assert vis or not test_data.shuffle
data_names = [k[0] for k in test_data.provide_data]
i = 0
t = time.time()
imdb_boxes = list()
original_boxes = list()
for im_info, data_batch in test_data:
t1 = time.time() - t
t = time.time()
scale = im_info[0, 2]
scores, boxes, data_dict = im_proposal(predictor, data_batch, data_names, scale)
t2 = time.time() - t
t = time.time()
# assemble proposals
dets = np.hstack((boxes, scores))
original_boxes.append(dets)
# filter proposals
keep = np.where(dets[:, 4:] > thresh)[0]
dets = dets[keep, :]
imdb_boxes.append(dets)
if vis:
vis_all_detection(data_dict['data'].asnumpy(), [dets], ['obj'], scale)
logger.info('generating %d/%d ' % (i + 1, imdb.num_images) +
'proposal %d ' % (dets.shape[0]) +
'data %.4fs net %.4fs' % (t1, t2))
i += 1
assert len(imdb_boxes) == imdb.num_images, 'calculations not complete'
# save results
rpn_folder = os.path.join(imdb.root_path, 'rpn_data')
if not os.path.exists(rpn_folder):
os.mkdir(rpn_folder)
rpn_file = os.path.join(rpn_folder, imdb.name + '_rpn.pkl')
with open(rpn_file, 'wb') as f:
cPickle.dump(imdb_boxes, f, cPickle.HIGHEST_PROTOCOL)
if thresh > 0:
full_rpn_file = os.path.join(rpn_folder, imdb.name + '_full_rpn.pkl')
with open(full_rpn_file, 'wb') as f:
cPickle.dump(original_boxes, f, cPickle.HIGHEST_PROTOCOL)
logger.info('wrote rpn proposals to %s' % rpn_file)
return imdb_boxes
def im_detect(predictor, data_batch, data_names, scale):
output = predictor.predict(data_batch)
data_dict = dict(zip(data_names, data_batch.data))
if config.TEST.HAS_RPN:
rois = output['rois_output'].asnumpy()[:, 1:]
else:
rois = data_dict['rois'].asnumpy().reshape((-1, 5))[:, 1:]
im_shape = data_dict['data'].shape
# save output
scores = output['cls_prob_reshape_output'].asnumpy()[0]
bbox_deltas = output['bbox_pred_reshape_output'].asnumpy()[0]
# post processing
pred_boxes = bbox_pred(rois, bbox_deltas)
pred_boxes = clip_boxes(pred_boxes, im_shape[-2:])
# we used scaled image & roi to train, so it is necessary to transform them back
pred_boxes = pred_boxes / scale
return scores, pred_boxes, data_dict
def pred_eval(predictor, test_data, imdb, vis=False, thresh=1e-3):
"""
wrapper for calculating offline validation for faster data analysis
in this example, all threshold are set by hand
:param predictor: Predictor
:param test_data: data iterator, must be non-shuffle
:param imdb: image database
:param vis: controls visualization
:param thresh: valid detection threshold
:return:
"""
assert vis or not test_data.shuffle
data_names = [k[0] for k in test_data.provide_data]
nms = py_nms_wrapper(config.TEST.NMS)
# limit detections to max_per_image over all classes
max_per_image = -1
num_images = imdb.num_images
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
i = 0
t = time.time()
for im_info, data_batch in test_data:
t1 = time.time() - t
t = time.time()
scale = im_info[0, 2]
scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, scale)
t2 = time.time() - t
t = time.time()
for j in range(1, imdb.num_classes):
indexes = np.where(scores[:, j] > thresh)[0]
cls_scores = scores[indexes, j, np.newaxis]
cls_boxes = boxes[indexes, j * 4:(j + 1) * 4]
cls_dets = np.hstack((cls_boxes, cls_scores))
keep = nms(cls_dets)
all_boxes[j][i] = cls_dets[keep, :]
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1]
for j in range(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in range(1, imdb.num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
if vis:
boxes_this_image = [[]] + [all_boxes[j][i] for j in range(1, imdb.num_classes)]
vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image, imdb.classes, scale)
t3 = time.time() - t
t = time.time()
logger.info('testing %d/%d data %.4fs net %.4fs post %.4fs' % (i, imdb.num_images, t1, t2, t3))
i += 1
det_file = os.path.join(imdb.cache_path, imdb.name + '_detections.pkl')
with open(det_file, 'wb') as f:
cPickle.dump(all_boxes, f, protocol=cPickle.HIGHEST_PROTOCOL)
imdb.evaluate_detections(all_boxes)
def vis_all_detection(im_array, detections, class_names, scale):
"""
visualize all detections in one image
:param im_array: [b=1 c h w] in rgb
:param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
:param class_names: list of names in imdb
:param scale: visualize the scaled image
:return:
"""
import matplotlib.pyplot as plt
import random
im = image.transform_inverse(im_array, config.PIXEL_MEANS)
plt.imshow(im)
for j, name in enumerate(class_names):
if name == '__background__':
continue
color = (random.random(), random.random(), random.random()) # generate a random color
dets = detections[j]
for det in dets:
bbox = det[:4] * scale
score = det[-1]
rect = plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor=color, linewidth=3.5)
plt.gca().add_patch(rect)
plt.gca().text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(name, score),
bbox=dict(facecolor=color, alpha=0.5), fontsize=12, color='white')
plt.show()
def draw_all_detection(im_array, detections, class_names, scale):
"""
visualize all detections in one image
:param im_array: [b=1 c h w] in rgb
:param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
:param class_names: list of names in imdb
:param scale: visualize the scaled image
:return:
"""
import cv2
import random
color_white = (255, 255, 255)
im = image.transform_inverse(im_array, config.PIXEL_MEANS)
# change to bgr
im = cv2.cvtColor(im, cv2.cv.CV_RGB2BGR)
for j, name in enumerate(class_names):
if name == '__background__':
continue
color = (random.randint(0, 256), random.randint(0, 256), random.randint(0, 256)) # generate a random color
dets = detections[j]
for det in dets:
bbox = det[:4] * scale
score = det[-1]
bbox = map(int, bbox)
cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), color=color, thickness=2)
cv2.putText(im, '%s %.3f' % (class_names[j], score), (bbox[0], bbox[1] + 10),
color=color_white, fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5)
return im
| apache-2.0 |
skoslowski/gnuradio | gnuradio-runtime/examples/volk_benchmark/volk_plot.py | 4 | 6303 | #!/usr/bin/env python
from __future__ import division
from __future__ import unicode_literals
import sys, math
import argparse
from volk_test_funcs import (create_connection, list_tables, get_results,
helper, timeit, format_results)
try:
import matplotlib
import matplotlib.pyplot as plt
except ImportError:
sys.stderr.write("Could not import Matplotlib (http://matplotlib.sourceforge.net/)\n")
sys.exit(1)
def main():
desc='Plot Volk performance results from a SQLite database. ' + \
'Run one of the volk tests first (e.g, volk_math.py)'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-D', '--database', type=str,
default='volk_results.db',
help='Database file to read data from [default: %(default)s]')
parser.add_argument('-E', '--errorbars',
action='store_true', default=False,
help='Show error bars (1 standard dev.)')
parser.add_argument('-P', '--plot', type=str,
choices=['mean', 'min', 'max'],
default='mean',
help='Set the type of plot to produce [default: %(default)s]')
parser.add_argument('-%', '--percent', type=str,
default=None, metavar="table",
help='Show percent difference to the given type [default: %(default)s]')
args = parser.parse_args()
# Set up global plotting properties
matplotlib.rcParams['figure.subplot.bottom'] = 0.2
matplotlib.rcParams['figure.subplot.top'] = 0.95
matplotlib.rcParams['figure.subplot.right'] = 0.98
matplotlib.rcParams['ytick.labelsize'] = 16
matplotlib.rcParams['xtick.labelsize'] = 16
matplotlib.rcParams['legend.fontsize'] = 18
# Get list of tables to compare
conn = create_connection(args.database)
tables = list_tables(conn)
M = len(tables)
# Colors to distinguish each table in the bar graph
# More than 5 tables will wrap around to the start.
colors = ['b', 'r', 'g', 'm', 'k']
# Set up figure for plotting
f0 = plt.figure(0, facecolor='w', figsize=(14,10))
s0 = f0.add_subplot(1,1,1)
# Create a register of names that exist in all tables
tmp_regs = []
for table in tables:
# Get results from the next table
res = get_results(conn, table[0])
tmp_regs.append(list())
for r in res:
try:
tmp_regs[-1].index(r['kernel'])
except ValueError:
tmp_regs[-1].append(r['kernel'])
# Get only those names that are common in all tables
name_reg = tmp_regs[0]
for t in tmp_regs[1:]:
name_reg = list(set(name_reg) & set(t))
name_reg.sort()
# Pull the data out for each table into a dictionary
# we can ref the table by it's name and the data associated
# with a given kernel in name_reg by it's name.
# This ensures there is no sorting issue with the data in the
# dictionary, so the kernels are plotted against each other.
table_data = dict()
for i,table in enumerate(tables):
# Get results from the next table
res = get_results(conn, table[0])
data = dict()
for r in res:
data[r['kernel']] = r
table_data[table[0]] = data
if args.percent is not None:
for i,t in enumerate(table_data):
if args.percent == t:
norm_data = []
for name in name_reg:
if(args.plot == 'max'):
norm_data.append(table_data[t][name]['max'])
elif(args.plot == 'min'):
norm_data.append(table_data[t][name]['min'])
elif(args.plot == 'mean'):
norm_data.append(table_data[t][name]['avg'])
# Plot the results
x0 = list(range(len(name_reg)))
i = 0
for t in (table_data):
ydata = []
stds = []
for name in name_reg:
stds.append(math.sqrt(table_data[t][name]['var']))
if(args.plot == 'max'):
ydata.append(table_data[t][name]['max'])
elif(args.plot == 'min'):
ydata.append(table_data[t][name]['min'])
elif(args.plot == 'mean'):
ydata.append(table_data[t][name]['avg'])
if args.percent is not None:
ydata = [-100*(y-n)/y for y,n in zip(ydata,norm_data)]
if(args.percent != t):
# makes x values for this data set placement
# width of bars depends on number of comparisons
wdth = 0.80 / (M-1)
x1 = [x + i*wdth for x in x0]
i += 1
s0.bar(x1, ydata, width=wdth,
color=colors[(i-1)%M], label=t,
edgecolor='k', linewidth=2)
else:
# makes x values for this data set placement
# width of bars depends on number of comparisons
wdth = 0.80 / M
x1 = [x + i*wdth for x in x0]
i += 1
if(args.errorbars is False):
s0.bar(x1, ydata, width=wdth,
color=colors[(i-1)%M], label=t,
edgecolor='k', linewidth=2)
else:
s0.bar(x1, ydata, width=wdth,
yerr=stds,
color=colors[i%M], label=t,
edgecolor='k', linewidth=2,
error_kw={"ecolor": 'k', "capsize":5,
"linewidth":2})
nitems = res[0]['nitems']
if args.percent is None:
s0.set_ylabel("Processing time (sec) [{0:G} items]".format(nitems),
fontsize=22, fontweight='bold',
horizontalalignment='center')
else:
s0.set_ylabel("% Improvement over {0} [{1:G} items]".format(
args.percent, nitems),
fontsize=22, fontweight='bold')
s0.legend()
s0.set_xticks(x0)
s0.set_xticklabels(name_reg)
for label in s0.xaxis.get_ticklabels():
label.set_rotation(45)
label.set_fontsize(16)
plt.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
elijah513/scikit-learn | examples/covariance/plot_outlier_detection.py | 235 | 3891 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates two
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"robust covariance estimator": EllipticEnvelope(contamination=.1)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = 0
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(0.5 * n_inliers, 2) - offset
X2 = 0.3 * np.random.randn(0.5 * n_inliers, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model with the One-Class SVM
plt.figure(figsize=(10, 5))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
y_pred = clf.decision_function(X).ravel()
threshold = stats.scoreatpercentile(y_pred,
100 * outliers_fraction)
y_pred = y_pred > threshold
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(1, 2, i + 1)
subplot.set_title("Outlier detection")
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=11))
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.show()
| bsd-3-clause |
blondegeek/pymatgen | pymatgen/io/abinit/works.py | 2 | 74708 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Works for Abinit
"""
import os
import shutil
import time
import abc
import collections
import numpy as np
import copy
from monty.collections import AttrDict
from monty.itertools import chunks
from monty.functools import lazy_property
from monty.fnmatch import WildCard
from pydispatch import dispatcher
from pymatgen.core.units import EnergyArray
from . import wrappers
from .nodes import Dependency, Node, NodeError, NodeResults, FileNode, check_spectator
from .tasks import (Task, AbinitTask, ScfTask, NscfTask, DfptTask, PhononTask, ElasticTask, DdkTask,
BseTask, RelaxTask, DdeTask, BecTask, ScrTask, SigmaTask, TaskManager,
DteTask, EphTask, CollinearThenNonCollinearScfTask)
from .utils import Directory
from .netcdf import ETSF_Reader, NetcdfReader
from .abitimer import AbinitTimerParser
import logging
logger = logging.getLogger(__name__)
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__all__ = [
"Work",
"BandStructureWork",
"RelaxWork",
"G0W0Work",
"QptdmWork",
"SigmaConvWork",
"BseMdfWork",
"PhononWork",
"PhononWfkqWork",
"GKKPWork",
"BecWork",
"DteWork",
]
class WorkResults(NodeResults):
JSON_SCHEMA = NodeResults.JSON_SCHEMA.copy()
@classmethod
def from_node(cls, work):
"""Initialize an instance from a :class:`Work` instance."""
new = super().from_node(work)
# Will put all files found in outdir in GridFs
# Warning: assuming binary files.
d = {os.path.basename(f): f for f in work.outdir.list_filepaths()}
new.register_gridfs_files(**d)
return new
class WorkError(NodeError):
"""Base class for the exceptions raised by Work objects."""
class BaseWork(Node, metaclass=abc.ABCMeta):
Error = WorkError
Results = WorkResults
# interface modeled after subprocess.Popen
@property
@abc.abstractmethod
def processes(self):
"""Return a list of objects that support the `subprocess.Popen` protocol."""
def poll(self):
"""
Check if all child processes have terminated. Set and return returncode attribute.
"""
return [task.poll() for task in self]
def wait(self):
"""
Wait for child processed to terminate. Set and return returncode attribute.
"""
return [task.wait() for task in self]
def communicate(self, input=None):
"""
Interact with processes: Send data to stdin. Read data from stdout and
stderr, until end-of-file is reached.
Wait for process to terminate. The optional input argument should be a
string to be sent to the child processed, or None, if no data should be
sent to the children.
communicate() returns a list of tuples (stdoutdata, stderrdata).
"""
return [task.communicate(input) for task in self]
@property
def returncodes(self):
"""
The children return codes, set by poll() and wait() (and indirectly by communicate()).
A None value indicates that the process hasn't terminated yet.
A negative value -N indicates that the child was terminated by signal N (Unix only).
"""
return [task.returncode for task in self]
@property
def ncores_reserved(self):
"""
Returns the number of cores reserved in this moment.
A core is reserved if it's still not running but
we have submitted the task to the queue manager.
"""
return sum(task.manager.num_cores for task in self if task.status == task.S_SUB)
@property
def ncores_allocated(self):
"""
Returns the number of CPUs allocated in this moment.
A core is allocated if it's running a task or if we have
submitted a task to the queue manager but the job is still pending.
"""
return sum(task.manager.num_cores for task in self if task.status in [task.S_SUB, task.S_RUN])
@property
def ncores_used(self):
"""
Returns the number of cores used in this moment.
A core is used if there's a job that is running on it.
"""
return sum(task.manager.num_cores for task in self if task.status == task.S_RUN)
def fetch_task_to_run(self):
"""
Returns the first task that is ready to run or
None if no task can be submitted at present"
Raises:
`StopIteration` if all tasks are done.
"""
# All the tasks are done so raise an exception
# that will be handled by the client code.
if all(task.is_completed for task in self):
raise StopIteration("All tasks completed.")
for task in self:
if task.can_run:
return task
# No task found, this usually happens when we have dependencies.
# Beware of possible deadlocks here!
logger.warning("Possible deadlock in fetch_task_to_run!")
return None
def fetch_alltasks_to_run(self):
"""
Returns a list with all the tasks that can be submitted.
Empty list if not task has been found.
"""
return [task for task in self if task.can_run]
@abc.abstractmethod
def setup(self, *args, **kwargs):
"""Method called before submitting the calculations."""
def _setup(self, *args, **kwargs):
self.setup(*args, **kwargs)
def connect_signals(self):
"""
Connect the signals within the work.
The :class:`Work` is responsible for catching the important signals raised from
its task and raise new signals when some particular condition occurs.
"""
for task in self:
dispatcher.connect(self.on_ok, signal=task.S_OK, sender=task)
def disconnect_signals(self):
"""
Disable the signals within the work. This function reverses the process of `connect_signals`
"""
for task in self:
try:
dispatcher.disconnect(self.on_ok, signal=task.S_OK, sender=task)
except dispatcher.errors.DispatcherKeyError as exc:
logger.debug(str(exc))
@property
def all_ok(self):
return all(task.status == task.S_OK for task in self)
#@check_spectator
def on_ok(self, sender):
"""
This callback is called when one task reaches status `S_OK`.
It executes on_all_ok when all tasks in self have reached `S_OK`.
"""
logger.debug("in on_ok with sender %s" % sender)
if self.all_ok:
if self.finalized:
return AttrDict(returncode=0, message="Work has been already finalized")
else:
# Set finalized here, because on_all_ok might change it (e.g. Relax + EOS in a single work)
self.finalized = True
try:
results = AttrDict(**self.on_all_ok())
except Exception as exc:
self.history.critical("on_all_ok raises %s" % str(exc))
self.finalized = False
raise
# Signal to possible observers that the `Work` reached S_OK
self.history.info("Work %s is finalized and broadcasts signal S_OK" % str(self))
if self._finalized:
self.send_signal(self.S_OK)
return results
return AttrDict(returncode=1, message="Not all tasks are OK!")
#@check_spectator
def on_all_ok(self):
"""
This method is called once the `Work` is completed i.e. when all tasks
have reached status S_OK. Subclasses should provide their own implementation
Returns:
Dictionary that must contain at least the following entries:
returncode:
0 on success.
message:
a string that should provide a human-readable description of what has been performed.
"""
return dict(returncode=0, message="Calling on_all_ok of the base class!")
def get_results(self, **kwargs):
"""
Method called once the calculations are completed.
The base version returns a dictionary task_name: TaskResults for each task in self.
"""
results = self.Results.from_node(self)
return results
def get_graphviz(self, engine="automatic", graph_attr=None, node_attr=None, edge_attr=None):
"""
Generate task graph in the DOT language (only parents and children of this work).
Args:
engine: Layout command used. ['dot', 'neato', 'twopi', 'circo', 'fdp', 'sfdp', 'patchwork', 'osage']
graph_attr: Mapping of (attribute, value) pairs for the graph.
node_attr: Mapping of (attribute, value) pairs set for all nodes.
edge_attr: Mapping of (attribute, value) pairs set for all edges.
Returns: graphviz.Digraph <https://graphviz.readthedocs.io/en/stable/api.html#digraph>
"""
from graphviz import Digraph
fg = Digraph("work", #filename="work_%s.gv" % os.path.basename(self.workdir),
engine="fdp" if engine == "automatic" else engine)
# Set graph attributes.
# https://www.graphviz.org/doc/info/
#fg.attr(label="%s@%s" % (self.__class__.__name__, self.relworkdir))
fg.attr(label=repr(self))
#fg.attr(fontcolor="white", bgcolor='purple:pink')
fg.attr(rankdir="LR", pagedir="BL")
#fg.attr(constraint="false", pack="true", packMode="clust")
fg.node_attr.update(color='lightblue2', style='filled')
#fg.node_attr.update(ranksep='equally')
# Add input attributes.
if graph_attr is not None:
fg.graph_attr.update(**graph_attr)
if node_attr is not None:
fg.node_attr.update(**node_attr)
if edge_attr is not None:
fg.edge_attr.update(**edge_attr)
def node_kwargs(node):
return dict(
#shape="circle",
color=node.color_hex,
label=(str(node) if not hasattr(node, "pos_str") else
node.pos_str + "\n" + node.__class__.__name__),
)
edge_kwargs = dict(arrowType="vee", style="solid")
cluster_kwargs = dict(rankdir="LR", pagedir="BL", style="rounded", bgcolor="azure2")
# Build cluster with tasks in *this* work
cluster_name = "cluster%s" % self.name
with fg.subgraph(name=cluster_name) as wg:
wg.attr(**cluster_kwargs)
wg.attr(label="%s (%s)" % (self.__class__.__name__, self.name))
for task in self:
wg.node(task.name, **node_kwargs(task))
# Connect task to children
for child in task.get_children():
# Test if child is in this cluster (self).
myg = wg if child in self else fg
myg.node(child.name, **node_kwargs(child))
# Find file extensions required by this task
i = [dep.node for dep in child.deps].index(task)
edge_label = "+".join(child.deps[i].exts)
myg.edge(task.name, child.name, label=edge_label, color=task.color_hex,
**edge_kwargs)
# Connect task to parents
for parent in task.get_parents():
# Test if parent is in this cluster (self).
myg = wg if parent in self else fg
myg.node(parent.name, **node_kwargs(parent))
# Find file extensions required by this task
i = [dep.node for dep in task.deps].index(parent)
edge_label = "+".join(task.deps[i].exts)
myg.edge(parent.name, task.name, label=edge_label, color=parent.color_hex,
**edge_kwargs)
# Treat the case in which we have a work producing output for tasks in *this* work.
#for work in self.flow:
# children = work.get_children()
# if not children or all(child not in self for child in children):
# continue
# cluster_name = "cluster%s" % work.name
# seen = set()
# for child in children:
# if child not in self: continue
# # This is not needed, too much confusing
# #fg.edge(cluster_name, child.name, color=work.color_hex, **edge_kwargs)
# # Find file extensions required by work
# i = [dep.node for dep in child.deps].index(work)
# for ext in child.deps[i].exts:
# out = "%s (%s)" % (ext, work.name)
# fg.node(out)
# fg.edge(out, child.name, **edge_kwargs)
# key = (cluster_name, out)
# if key not in seen:
# fg.edge(cluster_name, out, color=work.color_hex, **edge_kwargs)
# seen.add(key)
return fg
class NodeContainer(metaclass=abc.ABCMeta):
"""
Mixin classes for `Work` and `Flow` objects providing helper functions
to register tasks in the container. The helper function call the
`register` method of the container.
"""
# TODO: Abstract protocol for containers
@abc.abstractmethod
def register_task(self, *args, **kwargs):
"""
Register a task in the container.
"""
# TODO: shall flow.register_task return a Task or a Work?
# Helper functions
def register_scf_task(self, *args, **kwargs):
"""Register a Scf task."""
kwargs["task_class"] = ScfTask
return self.register_task(*args, **kwargs)
def register_collinear_then_noncollinear_scf_task(self, *args, **kwargs):
"""Register a Scf task that perform a SCF run first with nsppol = 2 and then nspinor = 2"""
kwargs["task_class"] = CollinearThenNonCollinearScfTask
return self.register_task(*args, **kwargs)
def register_nscf_task(self, *args, **kwargs):
"""Register a nscf task."""
kwargs["task_class"] = NscfTask
return self.register_task(*args, **kwargs)
def register_relax_task(self, *args, **kwargs):
"""Register a task for structural optimization."""
kwargs["task_class"] = RelaxTask
return self.register_task(*args, **kwargs)
def register_phonon_task(self, *args, **kwargs):
"""Register a phonon task."""
kwargs["task_class"] = PhononTask
return self.register_task(*args, **kwargs)
def register_elastic_task(self, *args, **kwargs):
"""Register an elastic task."""
kwargs["task_class"] = ElasticTask
return self.register_task(*args, **kwargs)
def register_ddk_task(self, *args, **kwargs):
"""Register a ddk task."""
kwargs["task_class"] = DdkTask
return self.register_task(*args, **kwargs)
def register_scr_task(self, *args, **kwargs):
"""Register a screening task."""
kwargs["task_class"] = ScrTask
return self.register_task(*args, **kwargs)
def register_sigma_task(self, *args, **kwargs):
"""Register a sigma task."""
kwargs["task_class"] = SigmaTask
return self.register_task(*args, **kwargs)
def register_dde_task(self, *args, **kwargs):
"""Register a Dde task."""
kwargs["task_class"] = DdeTask
return self.register_task(*args, **kwargs)
def register_dte_task(self, *args, **kwargs):
"""Register a Dte task."""
kwargs["task_class"] = DteTask
return self.register_task(*args, **kwargs)
def register_bec_task(self, *args, **kwargs):
"""Register a BEC task."""
kwargs["task_class"] = BecTask
return self.register_task(*args, **kwargs)
def register_bse_task(self, *args, **kwargs):
"""Register a Bethe-Salpeter task."""
kwargs["task_class"] = BseTask
return self.register_task(*args, **kwargs)
def register_eph_task(self, *args, **kwargs):
"""Register an electron-phonon task."""
kwargs["task_class"] = EphTask
return self.register_task(*args, **kwargs)
def walknset_vars(self, task_class=None, *args, **kwargs):
"""
Set the values of the ABINIT variables in the input files of the nodes
Args:
task_class: If not None, only the input files of the tasks belonging
to class `task_class` are modified.
Example:
flow.walknset_vars(ecut=10, kptopt=4)
"""
def change_task(task):
if task_class is not None and task.__class__ is not task_class: return False
return True
if self.is_work:
for task in self:
if not change_task(task): continue
task.set_vars(*args, **kwargs)
elif self.is_flow:
for task in self.iflat_tasks():
if not change_task(task): continue
task.set_vars(*args, **kwargs)
else:
raise TypeError("Don't know how to set variables for object class %s" % self.__class__.__name__)
class Work(BaseWork, NodeContainer):
"""
A Work is a list of (possibly connected) tasks.
"""
def __init__(self, workdir=None, manager=None):
"""
Args:
workdir: Path to the working directory.
manager: :class:`TaskManager` object.
"""
super().__init__()
self._tasks = []
if workdir is not None:
self.set_workdir(workdir)
if manager is not None:
self.set_manager(manager)
def set_manager(self, manager):
"""Set the :class:`TaskManager` to use to launch the :class:`Task`."""
self.manager = manager.deepcopy()
for task in self:
task.set_manager(manager)
@property
def flow(self):
"""The flow containing this :class:`Work`."""
return self._flow
def set_flow(self, flow):
"""Set the flow associated to this :class:`Work`."""
if not hasattr(self, "_flow"):
self._flow = flow
else:
if self._flow != flow:
raise ValueError("self._flow != flow")
@lazy_property
def pos(self):
"""The position of self in the :class:`Flow`"""
for i, work in enumerate(self.flow):
if self == work:
return i
raise ValueError("Cannot find the position of %s in flow %s" % (self, self.flow))
@property
def pos_str(self):
"""String representation of self.pos"""
return "w" + str(self.pos)
def set_workdir(self, workdir, chroot=False):
"""Set the working directory. Cannot be set more than once unless chroot is True"""
if not chroot and hasattr(self, "workdir") and self.workdir != workdir:
raise ValueError("self.workdir != workdir: %s, %s" % (self.workdir, workdir))
self.workdir = os.path.abspath(workdir)
# Directories with (input|output|temporary) data.
# The work will use these directories to connect
# itself to other works and/or to produce new data
# that will be used by its children.
self.indir = Directory(os.path.join(self.workdir, "indata"))
self.outdir = Directory(os.path.join(self.workdir, "outdata"))
self.tmpdir = Directory(os.path.join(self.workdir, "tmpdata"))
self.wdir = Directory(self.workdir)
def chroot(self, new_workdir):
self.set_workdir(new_workdir, chroot=True)
for i, task in enumerate(self):
new_tdir = os.path.join(self.workdir, "t" + str(i))
task.set_workdir(new_tdir, chroot=True)
def __len__(self):
return len(self._tasks)
def __iter__(self):
return self._tasks.__iter__()
def __getitem__(self, slice):
return self._tasks[slice]
def chunks(self, chunk_size):
"""Yield successive chunks of tasks of lenght chunk_size."""
for tasks in chunks(self, chunk_size):
yield tasks
def opath_from_ext(self, ext):
"""
Returns the path of the output file with extension ext.
Use it when the file does not exist yet.
"""
return self.indir.path_in("in_" + ext)
def opath_from_ext(self, ext):
"""
Returns the path of the output file with extension ext.
Use it when the file does not exist yet.
"""
return self.outdir.path_in("out_" + ext)
@property
def processes(self):
return [task.process for task in self]
@property
def all_done(self):
"""True if all the :class:`Task` objects in the :class:`Work` are done."""
return all(task.status >= task.S_DONE for task in self)
@property
def isnc(self):
"""True if norm-conserving calculation."""
return all(task.isnc for task in self)
@property
def ispaw(self):
"""True if PAW calculation."""
return all(task.ispaw for task in self)
@property
def status_counter(self):
"""
Returns a `Counter` object that counts the number of task with
given status (use the string representation of the status as key).
"""
counter = collections.Counter()
for task in self:
counter[str(task.status)] += 1
return counter
def allocate(self, manager=None):
"""
This function is called once we have completed the initialization
of the :class:`Work`. It sets the manager of each task (if not already done)
and defines the working directories of the tasks.
Args:
manager: :class:`TaskManager` object or None
"""
for i, task in enumerate(self):
if not hasattr(task, "manager"):
# Set the manager
# Use the one provided in input else the one of the work/flow.
if manager is not None:
task.set_manager(manager)
else:
# Look first in work and then in the flow.
if hasattr(self, "manager"):
task.set_manager(self.manager)
else:
task.set_manager(self.flow.manager)
task_workdir = os.path.join(self.workdir, "t" + str(i))
if not hasattr(task, "workdir"):
task.set_workdir(task_workdir)
else:
if task.workdir != task_workdir:
raise ValueError("task.workdir != task_workdir: %s, %s" % (task.workdir, task_workdir))
def register(self, obj, deps=None, required_files=None, manager=None, task_class=None):
"""
Registers a new :class:`Task` and add it to the internal list, taking into account possible dependencies.
Args:
obj: :class:`AbinitInput` instance or `Task` object.
deps: Dictionary specifying the dependency of this node or list of dependencies
None means that this obj has no dependency.
required_files: List of strings with the path of the files used by the task.
Note that the files must exist when the task is registered.
Use the standard approach based on Works, Tasks and deps
if the files will be produced in the future.
manager:
The :class:`TaskManager` responsible for the submission of the task. If manager is None, we use
the `TaskManager` specified during the creation of the :class:`Work`.
task_class: Task subclass to instantiate. Default: :class:`AbinitTask`
Returns:
:class:`Task` object
"""
task_workdir = None
if hasattr(self, "workdir"):
task_workdir = os.path.join(self.workdir, "t" + str(len(self)))
if isinstance(obj, Task):
task = obj
else:
# Set the class
if task_class is None:
task_class = AbinitTask
task = task_class.from_input(obj, task_workdir, manager)
self._tasks.append(task)
# Handle possible dependencies given either as dict or list.
if deps is not None:
if hasattr(deps, "items"):
deps = [Dependency(node, exts) for node, exts in deps.items()]
task.add_deps(deps)
# Handle possible dependencies.
if required_files is not None:
task.add_required_files(required_files)
return task
# Needed by NodeContainer
register_task = register
def path_in_workdir(self, filename):
"""Create the absolute path of filename in the working directory."""
return os.path.join(self.workdir, filename)
def setup(self, *args, **kwargs):
"""
Method called before running the calculations.
The default implementation is empty.
"""
def build(self, *args, **kwargs):
"""Creates the top level directory."""
# Create the directories of the work.
self.indir.makedirs()
self.outdir.makedirs()
self.tmpdir.makedirs()
# Build dirs and files of each task.
for task in self:
task.build(*args, **kwargs)
# Connect signals within the work.
self.connect_signals()
@property
def status(self):
"""
Returns the status of the work i.e. the minimum of the status of the tasks.
"""
return self.get_all_status(only_min=True)
def get_all_status(self, only_min=False):
"""
Returns a list with the status of the tasks in self.
Args:
only_min: If True, the minimum of the status is returned.
"""
if len(self) == 0:
# The work will be created in the future.
if only_min:
return self.S_INIT
else:
return [self.S_INIT]
self.check_status()
status_list = [task.status for task in self]
if only_min:
return min(status_list)
else:
return status_list
def check_status(self):
"""Check the status of the tasks."""
# Recompute the status of the tasks
# Ignore OK and LOCKED tasks.
for task in self:
if task.status in (task.S_OK, task.S_LOCKED): continue
task.check_status()
# Take into account possible dependencies. Use a list instead of generators
for task in self:
if task.status == task.S_LOCKED: continue
if task.status < task.S_SUB and all(status == task.S_OK for status in task.deps_status):
task.set_status(task.S_READY, "Status set to Ready")
def rmtree(self, exclude_wildcard=""):
"""
Remove all files and directories in the working directory
Args:
exclude_wildcard: Optional string with regular expressions separated by `|`.
Files matching one of the regular expressions will be preserved.
example: exclude_wildard="*.nc|*.txt" preserves all the files
whose extension is in ["nc", "txt"].
"""
if not exclude_wildcard:
shutil.rmtree(self.workdir)
else:
w = WildCard(exclude_wildcard)
for dirpath, dirnames, filenames in os.walk(self.workdir):
for fname in filenames:
path = os.path.join(dirpath, fname)
if not w.match(fname):
os.remove(path)
def rm_indatadir(self):
"""Remove all the indata directories."""
for task in self:
task.rm_indatadir()
def rm_outdatadir(self):
"""Remove all the indata directories."""
for task in self:
task.rm_outatadir()
def rm_tmpdatadir(self):
"""Remove all the tmpdata directories."""
for task in self:
task.rm_tmpdatadir()
def move(self, dest, isabspath=False):
"""
Recursively move self.workdir to another location. This is similar to the Unix "mv" command.
The destination path must not already exist. If the destination already exists
but is not a directory, it may be overwritten depending on os.rename() semantics.
Be default, dest is located in the parent directory of self.workdir, use isabspath=True
to specify an absolute path.
"""
if not isabspath:
dest = os.path.join(os.path.dirname(self.workdir), dest)
shutil.move(self.workdir, dest)
def submit_tasks(self, wait=False):
"""
Submits the task in self and wait.
TODO: change name.
"""
for task in self:
task.start()
if wait:
for task in self: task.wait()
def start(self, *args, **kwargs):
"""
Start the work. Calls build and _setup first, then submit the tasks.
Non-blocking call unless wait is set to True
"""
wait = kwargs.pop("wait", False)
# Initial setup
self._setup(*args, **kwargs)
# Build dirs and files.
self.build(*args, **kwargs)
# Submit tasks (does not block)
self.submit_tasks(wait=wait)
def read_etotals(self, unit="Ha"):
"""
Reads the total energy from the GSR file produced by the task.
Return a numpy array with the total energies in Hartree
The array element is set to np.inf if an exception is raised while reading the GSR file.
"""
if not self.all_done:
raise self.Error("Some task is still in running/submitted state")
etotals = []
for task in self:
# Open the GSR file and read etotal (Hartree)
gsr_path = task.outdir.has_abiext("GSR")
etot = np.inf
if gsr_path:
with ETSF_Reader(gsr_path) as r:
etot = r.read_value("etotal")
etotals.append(etot)
return EnergyArray(etotals, "Ha").to(unit)
def parse_timers(self):
"""
Parse the TIMER section reported in the ABINIT output files.
Returns:
:class:`AbinitTimerParser` object
"""
filenames = list(filter(os.path.exists, [task.output_file.path for task in self]))
parser = AbinitTimerParser()
parser.parse(filenames)
return parser
class BandStructureWork(Work):
"""Work for band structure calculations."""
def __init__(self, scf_input, nscf_input, dos_inputs=None, workdir=None, manager=None):
"""
Args:
scf_input: Input for the SCF run
nscf_input: Input for the NSCF run defining the band structure calculation.
dos_inputs: Input(s) for the DOS. DOS is computed only if dos_inputs is not None.
workdir: Working directory.
manager: :class:`TaskManager` object.
"""
super().__init__(workdir=workdir, manager=manager)
# Register the GS-SCF run.
self.scf_task = self.register_scf_task(scf_input)
# Register the NSCF run and its dependency.
self.nscf_task = self.register_nscf_task(nscf_input, deps={self.scf_task: "DEN"})
# Add DOS computation(s) if requested.
self.dos_tasks = []
if dos_inputs is not None:
if not isinstance(dos_inputs, (list, tuple)):
dos_inputs = [dos_inputs]
for dos_input in dos_inputs:
dos_task = self.register_nscf_task(dos_input, deps={self.scf_task: "DEN"})
self.dos_tasks.append(dos_task)
def plot_ebands(self, **kwargs):
"""
Plot the band structure. kwargs are passed to the plot method of :class:`ElectronBands`.
Returns:
`matplotlib` figure
"""
with self.nscf_task.open_gsr() as gsr:
return gsr.ebands.plot(**kwargs)
def plot_ebands_with_edos(self, dos_pos=0, method="gaussian", step=0.01, width=0.1, **kwargs):
"""
Plot the band structure and the DOS.
Args:
dos_pos: Index of the task from which the DOS should be obtained (note: 0 refers to the first DOS task).
method: String defining the method for the computation of the DOS.
step: Energy step (eV) of the linear mesh.
width: Standard deviation (eV) of the gaussian.
kwargs: Keyword arguments passed to `plot_with_edos` method to customize the plot.
Returns:
`matplotlib` figure.
"""
with self.nscf_task.open_gsr() as gsr:
gs_ebands = gsr.ebands
with self.dos_tasks[dos_pos].open_gsr() as gsr:
dos_ebands = gsr.ebands
edos = dos_ebands.get_edos(method=method, step=step, width=width)
return gs_ebands.plot_with_edos(edos, **kwargs)
def plot_edoses(self, dos_pos=None, method="gaussian", step=0.01, width=0.1, **kwargs):
"""
Plot the band structure and the DOS.
Args:
dos_pos: Index of the task from which the DOS should be obtained.
None is all DOSes should be displayed. Accepts integer or list of integers.
method: String defining the method for the computation of the DOS.
step: Energy step (eV) of the linear mesh.
width: Standard deviation (eV) of the gaussian.
kwargs: Keyword arguments passed to `plot` method to customize the plot.
Returns:
`matplotlib` figure.
"""
if dos_pos is not None and not isinstance(dos_pos, (list, tuple)): dos_pos = [dos_pos]
from abipy.electrons.ebands import ElectronDosPlotter
plotter = ElectronDosPlotter()
for i, task in enumerate(self.dos_tasks):
if dos_pos is not None and i not in dos_pos: continue
with task.open_gsr() as gsr:
edos = gsr.ebands.get_edos(method=method, step=step, width=width)
ngkpt = task.get_inpvar("ngkpt")
plotter.add_edos("ngkpt %s" % str(ngkpt), edos)
return plotter.combiplot(**kwargs)
class RelaxWork(Work):
"""
Work for structural relaxations. The first task relaxes the atomic position
while keeping the unit cell parameters fixed. The second task uses the final
structure to perform a structural relaxation in which both the atomic positions
and the lattice parameters are optimized.
"""
def __init__(self, ion_input, ioncell_input, workdir=None, manager=None, target_dilatmx=None):
"""
Args:
ion_input: Input for the relaxation of the ions (cell is fixed)
ioncell_input: Input for the relaxation of the ions and the unit cell.
workdir: Working directory.
manager: :class:`TaskManager` object.
"""
super().__init__(workdir=workdir, manager=manager)
self.ion_task = self.register_relax_task(ion_input)
# Note:
# 1) It would be nice to restart from the WFK file but ABINIT crashes due to the
# different unit cell parameters if paral_kgb == 1
#paral_kgb = ion_input[0]["paral_kgb"]
#if paral_kgb == 1:
#deps = {self.ion_task: "WFK"} # --> FIXME: Problem in rwwf
#deps = {self.ion_task: "DEN"}
deps = None
self.ioncell_task = self.register_relax_task(ioncell_input, deps=deps)
# Lock ioncell_task as ion_task should communicate to ioncell_task that
# the calculation is OK and pass the final structure.
self.ioncell_task.lock(source_node=self)
self.transfer_done = False
self.target_dilatmx = target_dilatmx
#@check_spectator
def on_ok(self, sender):
"""
This callback is called when one task reaches status S_OK.
If sender == self.ion_task, we update the initial structure
used by self.ioncell_task and we unlock it so that the job can be submitted.
"""
logger.debug("in on_ok with sender %s" % sender)
if sender == self.ion_task and not self.transfer_done:
# Get the relaxed structure from ion_task
ion_structure = self.ion_task.get_final_structure()
# Transfer it to the ioncell task (we do it only once).
self.ioncell_task._change_structure(ion_structure)
self.transfer_done = True
# Unlock ioncell_task so that we can submit it.
self.ioncell_task.unlock(source_node=self)
elif sender == self.ioncell_task and self.target_dilatmx:
actual_dilatmx = self.ioncell_task.get_inpvar('dilatmx', 1.)
if self.target_dilatmx < actual_dilatmx:
self.ioncell_task.reduce_dilatmx(target=self.target_dilatmx)
self.history.info('Converging dilatmx. Value reduce from {} to {}.'
.format(actual_dilatmx, self.ioncell_task.get_inpvar('dilatmx')))
self.ioncell_task.reset_from_scratch()
return super().on_ok(sender)
def plot_ion_relaxation(self, **kwargs):
"""
Plot the history of the ion-cell relaxation.
kwargs are passed to the plot method of :class:`HistFile`
Return `matplotlib` figure or None if hist file is not found.
"""
with self.ion_task.open_hist() as hist:
return hist.plot(**kwargs) if hist else None
def plot_ioncell_relaxation(self, **kwargs):
"""
Plot the history of the ion-cell relaxation.
kwargs are passed to the plot method of :class:`HistFile`
Return `matplotlib` figure or None if hist file is not found.
"""
with self.ioncell_task.open_hist() as hist:
return hist.plot(**kwargs) if hist else None
class G0W0Work(Work):
"""
Work for general G0W0 calculations.
All input can be either single inputs or lists of inputs
"""
def __init__(self, scf_inputs, nscf_inputs, scr_inputs, sigma_inputs,
workdir=None, manager=None):
"""
Args:
scf_inputs: Input(s) for the SCF run, if it is a list add all but only link
to the last input (used for convergence studies on the KS band gap)
nscf_inputs: Input(s) for the NSCF run, if it is a list add all but only
link to the last (i.e. addditiona DOS and BANDS)
scr_inputs: Input for the screening run
sigma_inputs: List of :class:AbinitInput`for the self-energy run.
if scr and sigma are lists of the same length, every sigma gets its own screening.
if there is only one screening all sigma inputs are linked to this one
workdir: Working directory of the calculation.
manager: :class:`TaskManager` object.
"""
super().__init__(workdir=workdir, manager=manager)
spread_scr = (isinstance(sigma_inputs, (list, tuple)) and
isinstance(scr_inputs, (list, tuple)) and
len(sigma_inputs) == len(scr_inputs))
#print("spread_scr", spread_scr)
self.sigma_tasks = []
# Register the GS-SCF run.
# register all scf_inputs but link the nscf only the last scf in the list
# multiple scf_inputs can be provided to perform convergence studies
if isinstance(scf_inputs, (list, tuple)):
for scf_input in scf_inputs:
self.scf_task = self.register_scf_task(scf_input)
else:
self.scf_task = self.register_scf_task(scf_inputs)
# Register the NSCF run (s).
if isinstance(nscf_inputs, (list, tuple)):
for nscf_input in nscf_inputs:
self.nscf_task = nscf_task = self.register_nscf_task(nscf_input, deps={self.scf_task: "DEN"})
else:
self.nscf_task = nscf_task = self.register_nscf_task(nscf_inputs, deps={self.scf_task: "DEN"})
# Register the SCR and SIGMA run(s).
if spread_scr:
for scr_input, sigma_input in zip(scr_inputs, sigma_inputs):
scr_task = self.register_scr_task(scr_input, deps={nscf_task: "WFK"})
sigma_task = self.register_sigma_task(sigma_input, deps={nscf_task: "WFK", scr_task: "SCR"})
self.sigma_tasks.append(sigma_task)
else:
# Sigma work(s) connected to the same screening.
scr_task = self.register_scr_task(scr_inputs, deps={nscf_task: "WFK"})
if isinstance(sigma_inputs, (list, tuple)):
for inp in sigma_inputs:
task = self.register_sigma_task(inp, deps={nscf_task: "WFK", scr_task: "SCR"})
self.sigma_tasks.append(task)
else:
task = self.register_sigma_task(sigma_inputs, deps={nscf_task: "WFK", scr_task: "SCR"})
self.sigma_tasks.append(task)
class SigmaConvWork(Work):
"""
Work for self-energy convergence studies.
"""
def __init__(self, wfk_node, scr_node, sigma_inputs, workdir=None, manager=None):
"""
Args:
wfk_node: The node who has produced the WFK file or filepath pointing to the WFK file.
scr_node: The node who has produced the SCR file or filepath pointing to the SCR file.
sigma_inputs: List of :class:`AbinitInput` for the self-energy runs.
workdir: Working directory of the calculation.
manager: :class:`TaskManager` object.
"""
# Cast to node instances.
wfk_node, scr_node = Node.as_node(wfk_node), Node.as_node(scr_node)
super().__init__(workdir=workdir, manager=manager)
# Register the SIGMA runs.
if not isinstance(sigma_inputs, (list, tuple)):
sigma_inputs = [sigma_inputs]
for sigma_input in sigma_inputs:
self.register_sigma_task(sigma_input, deps={wfk_node: "WFK", scr_node: "SCR"})
class BseMdfWork(Work):
"""
Work for simple BSE calculations in which the self-energy corrections
are approximated by the scissors operator and the screening is modeled
with the model dielectric function.
"""
def __init__(self, scf_input, nscf_input, bse_inputs, workdir=None, manager=None):
"""
Args:
scf_input: Input for the SCF run.
nscf_input: Input for the NSCF run.
bse_inputs: List of Inputs for the BSE run.
workdir: Working directory of the calculation.
manager: :class:`TaskManager`.
"""
super().__init__(workdir=workdir, manager=manager)
# Register the GS-SCF run.
self.scf_task = self.register_scf_task(scf_input)
# Construct the input for the NSCF run.
self.nscf_task = self.register_nscf_task(nscf_input, deps={self.scf_task: "DEN"})
# Construct the input(s) for the BSE run.
if not isinstance(bse_inputs, (list, tuple)):
bse_inputs = [bse_inputs]
for bse_input in bse_inputs:
self.register_bse_task(bse_input, deps={self.nscf_task: "WFK"})
def get_mdf_robot(self):
"""Builds and returns a :class:`MdfRobot` for analyzing the results in the MDF files."""
from abilab.robots import MdfRobot
robot = MdfRobot()
for task in self[2:]:
mdf_path = task.outdir.has_abiext(robot.EXT)
if mdf_path:
robot.add_file(str(task), mdf_path)
return robot
class QptdmWork(Work):
"""
This work parallelizes the calculation of the q-points of the screening.
It also provides the callback `on_all_ok` that calls mrgscr to merge
all the partial screening files produced.
"""
def create_tasks(self, wfk_file, scr_input):
"""
Create the SCR tasks and register them in self.
Args:
wfk_file: Path to the ABINIT WFK file to use for the computation of the screening.
scr_input: Input for the screening calculation.
"""
assert len(self) == 0
wfk_file = self.wfk_file = os.path.abspath(wfk_file)
# Build a temporary work in the tmpdir that will use a shell manager
# to run ABINIT in order to get the list of q-points for the screening.
shell_manager = self.manager.to_shell_manager(mpi_procs=1)
w = Work(workdir=self.tmpdir.path_join("_qptdm_run"), manager=shell_manager)
fake_input = scr_input.deepcopy()
fake_task = w.register(fake_input)
w.allocate()
w.build()
# Create the symbolic link and add the magic value
# nqpdm = -1 to the input to get the list of q-points.
fake_task.inlink_file(wfk_file)
fake_task.set_vars({"nqptdm": -1})
fake_task.start_and_wait()
# Parse the section with the q-points
with NetcdfReader(fake_task.outdir.has_abiext("qptdms.nc")) as reader:
qpoints = reader.read_value("reduced_coordinates_of_kpoints")
#print("qpoints)
# Now we can register the task for the different q-points
for qpoint in qpoints:
qptdm_input = scr_input.deepcopy()
qptdm_input.set_vars(nqptdm=1, qptdm=qpoint)
new_task = self.register_scr_task(qptdm_input, manager=self.manager)
# Add the garbage collector.
if self.flow.gc is not None:
new_task.set_gc(self.flow.gc)
self.allocate()
def merge_scrfiles(self, remove_scrfiles=True):
"""
This method is called when all the q-points have been computed.
It runs `mrgscr` in sequential on the local machine to produce
the final SCR file in the outdir of the `Work`.
If remove_scrfiles is True, the partial SCR files are removed after the merge.
"""
scr_files = list(filter(None, [task.outdir.has_abiext("SCR") for task in self]))
self.history.info("Will call mrgscr to merge %s SCR files:\n" % len(scr_files))
assert len(scr_files) == len(self)
mrgscr = wrappers.Mrgscr(manager=self[0].manager, verbose=1)
final_scr = mrgscr.merge_qpoints(self.outdir.path, scr_files, out_prefix="out")
if remove_scrfiles:
for scr_file in scr_files:
try:
os.remove(scr_file)
except IOError:
pass
return final_scr
#@check_spectator
def on_all_ok(self):
"""
This method is called when all the q-points have been computed.
It runs `mrgscr` in sequential on the local machine to produce
the final SCR file in the outdir of the `Work`.
"""
final_scr = self.merge_scrfiles()
return self.Results(node=self, returncode=0, message="mrgscr done", final_scr=final_scr)
# TODO: MergeDdb --> DfptWork(Work) postpone it because it may break pickle.
class MergeDdb:
"""Mixin class for Works that have to merge the DDB files produced by the tasks."""
def add_becs_from_scf_task(self, scf_task, ddk_tolerance, ph_tolerance):
"""
Build tasks for the computation of Born effective charges and add them to the work.
Args:
scf_task: ScfTask object.
ddk_tolerance: dict {"varname": value} with the tolerance used in the DDK run.
None to use AbiPy default.
ph_tolerance: dict {"varname": value} with the tolerance used in the phonon run.
None to use AbiPy default.
Return:
(ddk_tasks, bec_tasks)
"""
if not isinstance(scf_task, ScfTask):
raise TypeError("task `%s` does not inherit from ScfTask" % scf_task)
# DDK calculations (self-consistent to get electric field).
multi_ddk = scf_task.input.make_ddk_inputs(tolerance=ddk_tolerance)
ddk_tasks = []
for ddk_inp in multi_ddk:
ddk_task = self.register_ddk_task(ddk_inp, deps={scf_task: "WFK"})
ddk_tasks.append(ddk_task)
# Build the list of inputs for electric field perturbation and phonons
# Each BEC task is connected to all the previous DDK task and to the scf_task.
bec_deps = {ddk_task: "DDK" for ddk_task in ddk_tasks}
bec_deps.update({scf_task: "WFK"})
bec_inputs = scf_task.input.make_bec_inputs(tolerance=ph_tolerance)
bec_tasks = []
for bec_inp in bec_inputs:
bec_task = self.register_bec_task(bec_inp, deps=bec_deps)
bec_tasks.append(bec_task)
return ddk_tasks, bec_tasks
def merge_ddb_files(self, delete_source_ddbs=True, only_dfpt_tasks=True,
exclude_tasks=None, include_tasks=None):
"""
This method is called when all the q-points have been computed.
It runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
Args:
delete_source_ddbs: True if input DDB should be removed once final DDB is created.
only_dfpt_tasks: False to merge all DDB files produced by the tasks of the work
Useful e.g. for finite stress corrections in which the stress in the
initial configuration should be merged in the final DDB.
exclude_tasks: List of tasks that should be excluded when merging the partial DDB files.
include_tasks: List of tasks that should be included when merging the partial DDB files.
Mutually exclusive with exclude_tasks.
Returns:
path to the output DDB file
"""
if exclude_tasks:
my_tasks = [task for task in self if task not in exclude_tasks]
elif include_tasks:
my_tasks = [task for task in self if task in include_tasks]
else:
my_tasks = [task for task in self]
if only_dfpt_tasks:
ddb_files = list(filter(None, [task.outdir.has_abiext("DDB") for task in my_tasks \
if isinstance(task, DfptTask)]))
else:
ddb_files = list(filter(None, [task.outdir.has_abiext("DDB") for task in my_tasks]))
self.history.info("Will call mrgddb to merge %s DDB files:" % len(ddb_files))
# DDB files are always produces so this should never happen!
if not ddb_files:
raise RuntimeError("Cannot find any DDB file to merge by the task of " % self)
# Final DDB file will be produced in the outdir of the work.
out_ddb = self.outdir.path_in("out_DDB")
if len(ddb_files) == 1:
# Avoid the merge. Just copy the DDB file to the outdir of the work.
shutil.copy(ddb_files[0], out_ddb)
else:
# Call mrgddb
desc = "DDB file merged by %s on %s" % (self.__class__.__name__, time.asctime())
mrgddb = wrappers.Mrgddb(manager=self[0].manager, verbose=0)
mrgddb.merge(self.outdir.path, ddb_files, out_ddb=out_ddb, description=desc,
delete_source_ddbs=delete_source_ddbs)
return out_ddb
def merge_pot1_files(self, delete_source=True):
"""
This method is called when all the q-points have been computed.
It runs `mrgdvdb` in sequential on the local machine to produce
the final DVDB file in the outdir of the `Work`.
Args:
delete_source: True if POT1 files should be removed after (successful) merge.
Returns:
path to the output DVDB file. None if not DFPT POT file is found.
"""
natom = len(self[0].input.structure)
max_pertcase = 3 * natom
pot1_files = []
for task in self:
if not isinstance(task, DfptTask): continue
paths = task.outdir.list_filepaths(wildcard="*_POT*")
for path in paths:
# Include only atomic perturbations i.e. files whose ext <= 3 * natom
i = path.rindex("_POT")
pertcase = int(path[i+4:].replace(".nc", ""))
if pertcase <= max_pertcase:
pot1_files.append(path)
# prtpot = 0 disables the output of the DFPT POT files so an empty list is not fatal here.
if not pot1_files: return None
self.history.info("Will call mrgdvdb to merge %s files:" % len(pot1_files))
# Final DDB file will be produced in the outdir of the work.
out_dvdb = self.outdir.path_in("out_DVDB")
if len(pot1_files) == 1:
# Avoid the merge. Just move the DDB file to the outdir of the work
shutil.copy(pot1_files[0], out_dvdb)
else:
# FIXME: The merge may require a non-negligible amount of memory if lots of qpts.
# Besides there are machines such as lemaitre3 that are problematic when
# running MPI applications on the front-end
mrgdvdb = wrappers.Mrgdvdb(manager=self[0].manager, verbose=0)
mrgdvdb.merge(self.outdir.path, pot1_files, out_dvdb, delete_source=delete_source)
return out_dvdb
class PhononWork(Work, MergeDdb):
"""
This work consists of nirred Phonon tasks where nirred is
the number of irreducible atomic perturbations for a given set of q-points.
It provides the callback method (on_all_ok) that calls mrgddb (mrgdv) to merge
all the partial DDB (POT) files produced. The two files are available in the
output directory of the Work.
"""
@classmethod
def from_scf_task(cls, scf_task, qpoints, is_ngqpt=False, tolerance=None, with_becs=False,
ddk_tolerance=None, manager=None):
"""
Construct a `PhononWork` from a :class:`ScfTask` object.
The input file for phonons is automatically generated from the input of the ScfTask.
Each phonon task depends on the WFK file produced by the `scf_task`.
Args:
scf_task: ScfTask object.
qpoints: q-points in reduced coordinates. Accepts single q-point, list of q-points
or three integers defining the q-mesh if `is_ngqpt`.
is_ngqpt: True if `qpoints` should be interpreted as divisions instead of q-points.
tolerance: dict {"varname": value} with the tolerance to be used in the phonon run.
None to use AbiPy default.
with_becs: Activate calculation of Electric field and Born effective charges.
ddk_tolerance: dict {"varname": value} with the tolerance used in the DDK run if with_becs.
None to use AbiPy default.
manager: :class:`TaskManager` object.
"""
if not isinstance(scf_task, ScfTask):
raise TypeError("task `%s` does not inherit from ScfTask" % scf_task)
if is_ngqpt:
qpoints = scf_task.input.abiget_ibz(ngkpt=qpoints, shiftk=[0, 0, 0], kptopt=1).points
qpoints = np.reshape(qpoints, (-1, 3))
new = cls(manager=manager)
if with_becs:
new.add_becs_from_scf_task(scf_task, ddk_tolerance, ph_tolerance=tolerance)
for qpt in qpoints:
if with_becs and np.sum(qpt ** 2) < 1e-12: continue
multi = scf_task.input.make_ph_inputs_qpoint(qpt, tolerance=tolerance)
for ph_inp in multi:
new.register_phonon_task(ph_inp, deps={scf_task: "WFK"})
return new
@classmethod
def from_scf_input(cls, scf_input, qpoints, is_ngqpt=False, tolerance=None,
with_becs=False, ddk_tolerance=None, manager=None):
"""
Similar to `from_scf_task`, the difference is that this method requires
an input for SCF calculation. A new ScfTask is created and added to the Work.
This API should be used if the DDB of the GS task should be merged.
"""
if is_ngqpt:
qpoints = scf_input.abiget_ibz(ngkpt=qpoints, shiftk=[0, 0, 0], kptopt=1).points
qpoints = np.reshape(qpoints, (-1, 3))
new = cls(manager=manager)
# Create ScfTask
scf_task = new.register_scf_task(scf_input)
if with_becs:
new.add_becs_from_scf_task(scf_task, ddk_tolerance, ph_tolerance=tolerance)
for qpt in qpoints:
if with_becs and np.sum(qpt ** 2) < 1e-12: continue
multi = scf_task.input.make_ph_inputs_qpoint(qpt, tolerance=tolerance)
for ph_inp in multi:
new.register_phonon_task(ph_inp, deps={scf_task: "WFK"})
return new
#@check_spectator
def on_all_ok(self):
"""
This method is called when all the q-points have been computed.
Ir runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
"""
# Merge DDB files.
out_ddb = self.merge_ddb_files()
# Merge DVDB files.
out_dvdb = self.merge_pot1_files()
return self.Results(node=self, returncode=0, message="DDB merge done")
class PhononWfkqWork(Work, MergeDdb):
"""
This work computes phonons with DFPT on an arbitrary q-mesh (usually denser than the k-mesh for electrons)
by computing WKQ files for each q-point.
The number of irreducible atomic perturbations for each q-point are taken into account.
It provides the callback method (on_all_ok) that calls mrgddb (mrgdv) to merge
all the partial DDB (POT) files produced. The two files are available in the
output directory of the Work. The WKQ files are removed at runtime.
"""
@classmethod
def from_scf_task(cls, scf_task, ngqpt, ph_tolerance=None, tolwfr=1.0e-22, nband=None,
with_becs=False, ddk_tolerance=None, shiftq=(0, 0, 0), is_ngqpt=True, remove_wfkq=True,
manager=None):
"""
Construct a `PhononWfkqWork` from a :class:`ScfTask` object.
The input files for WFQ and phonons are automatically generated from the input of the ScfTask.
Each phonon task depends on the WFK file produced by scf_task and the associated WFQ file.
Args:
scf_task: ScfTask object.
ngqpt: three integers defining the q-mesh
with_becs: Activate calculation of Electric field and Born effective charges.
ph_tolerance: dict {"varname": value} with the tolerance for the phonon run.
None to use AbiPy default.
tolwfr: tolerance used to compute WFQ.
ddk_tolerance: dict {"varname": value} with the tolerance used in the DDK run if with_becs.
None to use AbiPy default.
shiftq: Q-mesh shift. Multiple shifts are not supported.
is_ngqpt: the ngqpt is interpreted as a set of integers defining the q-mesh, otherwise
is an explicit list of q-points
remove_wfkq: Remove WKQ files when the children are completed.
manager: :class:`TaskManager` object.
.. note:
Use k-meshes with one shift and q-meshes that are multiple of ngkpt
to decrease the number of WFQ files to be computed.
"""
if not isinstance(scf_task, ScfTask):
raise TypeError("task `%s` does not inherit from ScfTask" % scf_task)
shiftq = np.reshape(shiftq, (3,))
if is_ngqpt:
qpoints = scf_task.input.abiget_ibz(ngkpt=ngqpt, shiftk=shiftq, kptopt=1).points
else:
qpoints = ngqpt
new = cls(manager=manager)
new.remove_wfkq = remove_wfkq
new.wfkq_tasks = []
new.wfkq_task_children = collections.defaultdict(list)
if with_becs:
# Add DDK and BECS.
new.add_becs_from_scf_task(scf_task, ddk_tolerance, ph_tolerance)
# Get ngkpt, shift for electrons from input.
# Won't try to skip WFQ if multiple shifts or off-diagonal kptrlatt
ngkpt, shiftk = scf_task.input.get_ngkpt_shiftk()
try_to_skip_wfkq = True
if ngkpt is None or len(shiftk) > 1 and is_ngqpt:
try_to_skip_wfkq = True
# TODO: One could avoid kptopt 3 by computing WFK in the IBZ and then rotating.
# but this has to be done inside Abinit.
for qpt in qpoints:
is_gamma = np.sum(qpt ** 2) < 1e-12
if with_becs and is_gamma: continue
# Avoid WFQ if k + q = k (requires ngkpt, multiple shifts are not supported)
need_wfkq = True
if is_gamma:
need_wfkq = False
elif try_to_skip_wfkq:
# k = (i + shiftk) / ngkpt
qinds = np.rint(qpt * ngqpt - shiftq)
f = (qinds * ngkpt) % ngqpt
need_wfkq = np.any(f != 0)
if need_wfkq:
nscf_inp = scf_task.input.new_with_vars(qpt=qpt, nqpt=1, iscf=-2, kptopt=3, tolwfr=tolwfr)
if nband:
nbdbuf = max(2,nband*0.1)
nscf_inp.set_vars(nband=nband+nbdbuf, nbdbuf=nbdbuf)
wfkq_task = new.register_nscf_task(nscf_inp, deps={scf_task: ["DEN", "WFK"]})
new.wfkq_tasks.append(wfkq_task)
multi = scf_task.input.make_ph_inputs_qpoint(qpt, tolerance=ph_tolerance)
for ph_inp in multi:
deps = {scf_task: "WFK", wfkq_task: "WFQ"} if need_wfkq else {scf_task: "WFK"}
#ph_inp["prtwf"] = -1
t = new.register_phonon_task(ph_inp, deps=deps)
if need_wfkq:
new.wfkq_task_children[wfkq_task].append(t)
return new
def on_ok(self, sender):
"""
This callback is called when one task reaches status `S_OK`.
It removes the WFKQ file if all its children have reached `S_OK`.
"""
if self.remove_wfkq:
for task in self.wfkq_tasks:
if task.status != task.S_OK: continue
children = self.wfkq_task_children[task]
if all(child.status == child.S_OK for child in children):
path = task.outdir.has_abiext("WFQ")
if path:
self.history.info("Removing WFQ: %s" % path)
os.remove(path)
return super().on_ok(sender)
#@check_spectator
def on_all_ok(self):
"""
This method is called when all the q-points have been computed.
Ir runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
"""
# Merge DDB files.
out_ddb = self.merge_ddb_files()
# Merge DVDB files.
out_dvdb = self.merge_pot1_files()
return self.Results(node=self, returncode=0, message="DDB merge done")
class GKKPWork(Work):
"""
This work computes electron-phonon matrix elements for all the q-points
present in a DVDB and DDB file
"""
@classmethod
def from_den_ddb_dvdb(cls, inp, den_path, ddb_path, dvdb_path, mpiprocs=1, remove_wfkq=True,
qpath=None, with_ddk=True, expand=True, manager=None):
"""
Construct a `PhononWfkqWork` from a DDB and DVDB file.
For each q found, a WFQ task and an EPH task computing the matrix elements are created.
"""
import abipy.abilab as abilab
# Create file nodes
den_file = FileNode(den_path)
ddb_file = FileNode(ddb_path)
dvdb_file = FileNode(dvdb_path)
# Create new work
new = cls(manager=manager)
new.remove_wfkq = remove_wfkq
new.wfkq_tasks = []
new.wfkq_task_children = collections.defaultdict(list)
if manager is None: manager = TaskManager.from_user_config()
tm = manager.new_with_fixed_mpi_omp(mpiprocs, 1)
# Create a WFK task
kptopt = 1 if expand else 3
nscf_inp = inp.new_with_vars(iscf=-2, kptopt=kptopt)
wfk_task = new.register_nscf_task(nscf_inp, deps={den_file: "DEN"},manager=tm)
new.wfkq_tasks.append(wfk_task)
new.wfk_task = wfk_task
# Read path and regular grid from DDB file
with abilab.abiopen(ddb_path) as ddb:
q_frac_coords = np.array([k.frac_coords for k in ddb.qpoints])
ddb_ngqpt = ddb.guessed_ngqpt
# If qpath is set, we read the list of q-points to be used to interpolate the DVDB file.
# The DVDB and DDB file have to correspond to a regular grid.
dvdb = dvdb_file
if qpath is None:
qpath = q_frac_coords
else:
interp_inp = inp.new_with_vars(optdriver=7, eph_task=-5, ddb_ngqpt=ddb_ngqpt,
ph_nqpath=len(qpath), ph_qpath=qpath, prtphdos=0)
dvdb = new.register_eph_task(interp_inp, deps={wfk_task: "WFK", ddb_file: "DDB", dvdb_file: "DVDB"},
manager=tm)
# Create a WFK expansion task
if expand:
fbz_nscf_inp = inp.new_with_vars(optdriver=8)
fbz_nscf_inp.set_spell_check(False)
fbz_nscf_inp.set_vars(wfk_task="wfk_fullbz")
tm_serial = manager.new_with_fixed_mpi_omp(1,1)
wfk_task = new.register_nscf_task(fbz_nscf_inp, deps={wfk_task: "WFK", den_file: "DEN"},
manager=tm_serial)
new.wfkq_tasks.append(wfk_task)
new.wfk_task = wfk_task
if with_ddk:
kptopt = 3 if expand else 1
ddk_inp = inp.new_with_vars(optdriver=8,kptopt=kptopt)
ddk_inp.set_spell_check(False)
ddk_inp.set_vars(wfk_task="wfk_ddk")
ddk_task = new.register_nscf_task(ddk_inp, deps={wfk_task: "WFK", den_file: "DEN"}, manager=tm)
new.wfkq_tasks.append(ddk_task)
# For each qpoint
for qpt in qpath:
is_gamma = np.sum(qpt ** 2) < 1e-12
if is_gamma:
# Create a link from WFK to WFQ on_ok
wfkq_task = wfk_task
deps = {wfk_task: ["WFK","WFQ"], ddb_file: "DDB", dvdb: "DVDB" }
else:
# Create a WFQ task
nscf_inp = nscf_inp.new_with_vars(kptopt=3, qpt=qpt, nqpt=1)
wfkq_task = new.register_nscf_task(nscf_inp, deps={den_file: "DEN"}, manager=tm)
new.wfkq_tasks.append(wfkq_task)
deps = {wfk_task: "WFK", wfkq_task: "WFQ", ddb_file: "DDB", dvdb: "DVDB" }
# Create a EPH task
eph_inp = inp.new_with_vars(optdriver=7, prtphdos=0, eph_task=-2, kptopt=3,
ddb_ngqpt=[1,1,1], nqpt=1, qpt=qpt)
t = new.register_eph_task(eph_inp, deps=deps, manager=tm)
new.wfkq_task_children[wfkq_task].append(t)
return new
@classmethod
def from_phononwfkq_work(cls, phononwfkq_work, nscf_vars={}, remove_wfkq=True, with_ddk=True, manager=None):
"""
Construct a `GKKPWork` from a `PhononWfkqWork` object.
The WFQ are the ones used for PhononWfkqWork so in principle have only valence bands
"""
# Get list of qpoints from the the phonon tasks in this work
qpoints = []
qpoints_deps = []
for task in phononwfkq_work:
if isinstance(task,PhononTask):
# Store qpoints
qpt = task.input.get("qpt", [0,0,0])
qpoints.append(qpt)
# Store dependencies
qpoints_deps.append(task.deps)
# Create file nodes
ddb_path = phononwfkq_work.outdir.has_abiext("DDB")
dvdb_path = phononwfkq_work.outdir.has_abiext("DVDB")
ddb_file = FileNode(ddb_path)
dvdb_file = FileNode(dvdb_path)
# Get scf_task from first q-point
for dep in qpoints_deps[0]:
if isinstance(dep.node,ScfTask) and dep.exts[0] == 'WFK':
scf_task = dep.node
# Create new work
new = cls(manager=manager)
new.remove_wfkq = remove_wfkq
new.wfkq_tasks = []
new.wfk_task = []
# Add one eph task per qpoint
for qpt,qpoint_deps in zip(qpoints,qpoints_deps):
# Create eph task
eph_input = scf_task.input.new_with_vars(optdriver=7, prtphdos=0, eph_task=-2,
ddb_ngqpt=[1,1,1], nqpt=1, qpt=qpt)
deps = {ddb_file: "DDB", dvdb_file: "DVDB" }
for dep in qpoint_deps:
deps[dep.node] = dep.exts[0]
# If no WFQ in deps link the WFK with WFQ extension
if 'WFQ' not in deps.values():
inv_deps = dict((v, k) for k, v in deps.items())
wfk_task = inv_deps['WFK']
wfk_path = wfk_task.outdir.has_abiext("WFK")
# Check if netcdf
filename, extension = os.path.splitext(wfk_path)
infile = 'out_WFQ' + extension
wfq_path = os.path.join(os.path.dirname(wfk_path), infile)
if not os.path.isfile(wfq_path): os.symlink(wfk_path, wfq_path)
deps[FileNode(wfq_path)] = 'WFQ'
new.register_eph_task(eph_input, deps=deps)
return new
def on_ok(self, sender):
"""
This callback is called when one task reaches status `S_OK`.
It removes the WFKQ file if all its children have reached `S_OK`.
"""
if self.remove_wfkq:
for task in self.wfkq_tasks:
if task.status != task.S_OK: continue
children = self.wfkq_task_children[task]
if all(child.status == child.S_OK for child in children):
path = task.outdir.has_abiext("WFQ")
if path:
self.history.info("Removing WFQ: %s" % path)
os.remove(path)
# If wfk task we create a link to a wfq file so abinit is happy
if sender == self.wfk_task:
wfk_path = self.wfk_task.outdir.has_abiext("WFK")
# Check if netcdf
filename, extension = os.path.splitext(wfk_path)
infile = 'out_WFQ' + extension
infile = os.path.join(os.path.dirname(wfk_path), infile)
os.symlink(wfk_path, infile)
return super().on_ok(sender)
class BecWork(Work, MergeDdb):
"""
Work for the computation of the Born effective charges.
This work consists of DDK tasks and phonon + electric field perturbation
It provides the callback method (on_all_ok) that calls mrgddb to merge the
partial DDB files produced by the work.
"""
@classmethod
def from_scf_task(cls, scf_task, ddk_tolerance=None, ph_tolerance=None, manager=None):
"""
Build tasks for the computation of Born effective charges from a ground-state task.
Args:
scf_task: ScfTask object.
ddk_tolerance: tolerance used in the DDK run if with_becs. None to use AbiPy default.
ph_tolerance: dict {"varname": value} with the tolerance used in the phonon run.
None to use AbiPy default.
manager: :class:`TaskManager` object.
"""
new = cls(manager=manager)
new.add_becs_from_scf_task(scf_task, ddk_tolerance, ph_tolerance)
return new
def on_all_ok(self):
"""
This method is called when all tasks reach S_OK
Ir runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
"""
# Merge DDB files.
out_ddb = self.merge_ddb_files()
return self.Results(node=self, returncode=0, message="DDB merge done")
class DteWork(Work, MergeDdb):
"""
Work for the computation of the third derivative of the energy.
This work consists of DDK tasks and electric field perturbation.
It provides the callback method (on_all_ok) that calls mrgddb to merge the partial DDB files produced
"""
@classmethod
def from_scf_task(cls, scf_task, ddk_tolerance=None, manager=None):
"""
Build a DteWork from a ground-state task.
Args:
scf_task: ScfTask object.
ddk_tolerance: tolerance used in the DDK run if with_becs. None to use AbiPy default.
manager: :class:`TaskManager` object.
"""
if not isinstance(scf_task, ScfTask):
raise TypeError("task `%s` does not inherit from ScfTask" % scf_task)
new = cls(manager=manager)
# DDK calculations
multi_ddk = scf_task.input.make_ddk_inputs(tolerance=ddk_tolerance)
ddk_tasks = []
for ddk_inp in multi_ddk:
ddk_task = new.register_ddk_task(ddk_inp, deps={scf_task: "WFK"})
ddk_tasks.append(ddk_task)
# Build the list of inputs for electric field perturbation
# Each task is connected to all the previous DDK, DDE task and to the scf_task.
multi_dde = scf_task.input.make_dde_inputs(use_symmetries=False)
# To compute the nonlinear coefficients all the directions of the perturbation
# have to be taken in consideration
# DDE calculations
dde_tasks = []
dde_deps = {ddk_task: "DDK" for ddk_task in ddk_tasks}
dde_deps.update({scf_task: "WFK"})
for dde_inp in multi_dde:
dde_task = new.register_dde_task(dde_inp, deps=dde_deps)
dde_tasks.append(dde_task)
# DTE calculations
dte_deps = {scf_task: "WFK DEN"}
dte_deps.update({dde_task: "1WF 1DEN" for dde_task in dde_tasks})
multi_dte = scf_task.input.make_dte_inputs()
dte_tasks = []
for dte_inp in multi_dte:
dte_task = new.register_dte_task(dte_inp, deps=dte_deps)
dte_tasks.append(dte_task)
return new
def on_all_ok(self):
"""
This method is called when all tasks reach S_OK
Ir runs `mrgddb` in sequential on the local machine to produce
the final DDB file in the outdir of the `Work`.
"""
# Merge DDB files.
out_ddb = self.merge_ddb_files()
return self.Results(node=self, returncode=0, message="DDB merge done")
| mit |
aestrivex/mne-python | examples/plot_compute_mne_inverse.py | 21 | 1885 | """
================================================
Compute MNE-dSPM inverse solution on evoked data
================================================
Compute dSPM inverse solution on MNE evoked dataset
and stores the solution in stc files for visualisation.
"""
# Author: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
from mne.datasets import sample
from mne import read_evokeds
from mne.minimum_norm import apply_inverse, read_inverse_operator
print(__doc__)
data_path = sample.data_path()
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
subjects_dir = data_path + '/subjects'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Load data
evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
inverse_operator = read_inverse_operator(fname_inv)
# Compute inverse solution
stc = apply_inverse(evoked, inverse_operator, lambda2, method,
pick_ori=None)
# Save result in stc files
stc.save('mne_%s_inverse' % method)
###############################################################################
# View activation time-series
plt.plot(1e3 * stc.times, stc.data[::100, :].T)
plt.xlabel('time (ms)')
plt.ylabel('%s value' % method)
plt.show()
# Plot brain in 3D with PySurfer if available
brain = stc.plot(hemi='rh', subjects_dir=subjects_dir)
brain.show_view('lateral')
# use peak getter to move vizualization to the time point of the peak
vertno_max, time_idx = stc.get_peak(hemi='rh', time_as_index=True)
brain.set_data_time_index(time_idx)
# draw marker at maximum peaking vertex
brain.add_foci(vertno_max, coords_as_verts=True, hemi='rh', color='blue',
scale_factor=0.6)
brain.save_image('dSPM_map.png')
| bsd-3-clause |
rsivapr/scikit-learn | sklearn/linear_model/tests/test_perceptron.py | 378 | 1815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| bsd-3-clause |
mrshu/scikit-learn | sklearn/ensemble/forest.py | 1 | 49130 | """Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremly randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe, Brian Holt
# License: BSD 3
import itertools
import numpy as np
from warnings import warn
from abc import ABCMeta, abstractmethod
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..feature_selection.selector_mixin import SelectorMixin
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import array2d, check_random_state, check_arrays, safe_asarray
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from .base import BaseEnsemble
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor"]
MAX_INT = np.iinfo(np.int32).max
def _parallel_build_trees(n_trees, forest, X, y,
sample_mask, X_argsorted, seed, verbose):
"""Private function used to build a batch of trees within a job."""
random_state = check_random_state(seed)
trees = []
for i in xrange(n_trees):
if verbose > 1:
print("building tree %d of %d" % (i + 1, n_trees))
seed = random_state.randint(MAX_INT)
tree = forest._make_estimator(append=False)
tree.set_params(compute_importances=forest.compute_importances)
tree.set_params(random_state=check_random_state(seed))
if forest.bootstrap:
n_samples = X.shape[0]
indices = random_state.randint(0, n_samples, n_samples)
tree.fit(X[indices], y[indices],
sample_mask=sample_mask, X_argsorted=X_argsorted,
check_input=False)
tree.indices_ = indices
else:
tree.fit(X, y,
sample_mask=sample_mask, X_argsorted=X_argsorted,
check_input=False)
trees.append(tree)
return trees
def _parallel_predict_proba(trees, X, n_classes, n_outputs):
"""Private function used to compute a batch of predictions within a job."""
n_samples = X.shape[0]
if n_outputs == 1:
proba = np.zeros((n_samples, n_classes))
for tree in trees:
proba_tree = tree.predict_proba(X)
if n_classes == tree.n_classes_:
proba += proba_tree
else:
for j, c in enumerate(tree.classes_):
proba[:, c] += proba_tree[:, j]
else:
proba = []
for k in xrange(n_outputs):
proba.append(np.zeros((n_samples, n_classes[k])))
for tree in trees:
proba_tree = tree.predict_proba(X)
for k in xrange(n_outputs):
if n_classes[k] == tree.n_classes_[k]:
proba[k] += proba_tree[k]
else:
for j, c in enumerate(tree.classes_[k]):
proba[k][:, c] += proba_tree[k][:, j]
return proba
def _parallel_predict_regression(trees, X):
"""Private function used to compute a batch of predictions within a job."""
return sum(tree.predict(X) for tree in trees)
def _partition_trees(forest):
"""Private function used to partition trees between jobs."""
# Compute the number of jobs
if forest.n_jobs == -1:
n_jobs = min(cpu_count(), forest.n_estimators)
else:
n_jobs = min(forest.n_jobs, forest.n_estimators)
# Partition trees between jobs
n_trees = [int(forest.n_estimators / n_jobs)] * n_jobs
for i in xrange(forest.n_estimators % n_jobs):
n_trees[i] += 1
starts = [0] * (n_jobs + 1)
for i in xrange(1, n_jobs + 1):
starts[i] = starts[i - 1] + n_trees[i - 1]
return n_jobs, n_trees, starts
def _parallel_X_argsort(X):
"""Private function used to sort the features of X."""
return np.asarray(np.argsort(X.T, axis=1).T, dtype=np.int32, order="F")
def _partition_features(forest, n_total_features):
"""Private function used to partition features between jobs."""
# Compute the number of jobs
if forest.n_jobs == -1:
n_jobs = min(cpu_count(), n_total_features)
else:
n_jobs = min(forest.n_jobs, n_total_features)
# Partition features between jobs
n_features = [n_total_features / n_jobs] * n_jobs
for i in xrange(n_total_features % n_jobs):
n_features[i] += 1
starts = [0] * (n_jobs + 1)
for i in xrange(1, n_jobs + 1):
starts[i] = starts[i - 1] + n_features[i - 1]
return n_jobs, n_features, starts
class BaseForest(BaseEnsemble, SelectorMixin):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
compute_importances=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.compute_importances = compute_importances
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.feature_importances_ = None
self.verbose = verbose
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = array2d(X, dtype=np.float32, order='C')
return np.array([est.tree_.apply(X) for est in self.estimators_]).T
def fit(self, X, y):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (integers that correspond to classes in
classification, real numbers in regression).
Returns
-------
self : object
Returns self.
"""
self.random_state = check_random_state(self.random_state)
# Precompute some data
X, y = check_arrays(X, y, sparse_format="dense")
if (getattr(X, "dtype", None) != DTYPE or
X.ndim != 2 or not X.flags.fortran):
X = array2d(X, dtype=DTYPE, order="F")
n_samples, self.n_features_ = X.shape
if self.bootstrap:
sample_mask = None
X_argsorted = None
else:
if self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
sample_mask = np.ones((n_samples,), dtype=np.bool)
n_jobs, _, starts = _partition_features(self, self.n_features_)
all_X_argsorted = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_X_argsort)(
X[:, starts[i]:starts[i + 1]])
for i in xrange(n_jobs))
X_argsorted = np.asfortranarray(np.hstack(all_X_argsorted))
y = np.atleast_1d(y)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if isinstance(self.base_estimator, ClassifierMixin):
y = np.copy(y)
if self.n_outputs_ == 1:
self.classes_ = np.unique(y)
self.n_classes_ = len(self.classes_)
else:
self.classes_ = []
self.n_classes_ = []
for k in xrange(self.n_outputs_):
unique = np.unique(y[:, k])
self.classes_.append(unique)
self.n_classes_.append(unique.shape[0])
y[:, k] = np.searchsorted(unique, y[:, k])
else:
if self.n_outputs_ == 1:
self.classes_ = None
self.n_classes_ = 1
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Assign chunk of trees to jobs
n_jobs, n_trees, _ = _partition_trees(self)
# Parallel loop
all_trees = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_build_trees)(
n_trees[i],
self,
X,
y,
sample_mask,
X_argsorted,
self.random_state.randint(MAX_INT),
verbose=self.verbose)
for i in xrange(n_jobs))
# Reduce
self.estimators_ = [tree for tree in itertools.chain(*all_trees)]
# Calculate out of bag predictions and score
if self.oob_score:
if isinstance(self, ClassifierMixin):
self.oob_decision_function_ = []
self.oob_score_ = 0.0
n_classes_ = self.n_classes_
classes_ = self.classes_
if self.n_outputs_ == 1:
n_classes_ = [n_classes_]
classes_ = [classes_]
predictions = []
for k in xrange(self.n_outputs_):
predictions.append(np.zeros((n_samples,
n_classes_[k])))
for estimator in self.estimators_:
mask = np.ones(n_samples, dtype=np.bool)
mask[estimator.indices_] = False
p_estimator = estimator.predict_proba(X[mask, :])
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in xrange(self.n_outputs_):
predictions[k][mask, :] += p_estimator[k]
for k in xrange(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
self.oob_decision_function_.append(decision)
self.oob_score_ += (np.mean(y[:, k] ==
classes_[k].take(
np.argmax(predictions[k], axis=1),
axis=0)))
if self.n_outputs_ == 1:
self.oob_decision_function_ = \
self.oob_decision_function_[0]
self.oob_score_ /= self.n_outputs_
else:
# Regression:
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
mask = np.ones(n_samples, dtype=np.bool)
mask[estimator.indices_] = False
p_estimator = estimator.predict(X[mask, :])
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[mask, :] += p_estimator
n_predictions[mask, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in xrange(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k], predictions[:, k])
self.oob_score_ /= self.n_outputs_
# Sum the importances
if self.compute_importances:
self.feature_importances_ = \
sum(tree.feature_importances_ for tree in self.estimators_) \
/ self.n_estimators
return self
class ForestClassifier(BaseForest, ClassifierMixin):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
compute_importances=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
compute_importances=compute_importances,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is computed as the majority
prediction of the trees in the forest.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
n_samples = len(X)
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in xrange(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the trees in the forest.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are
ordered by arithmetical order.
"""
# Check data
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = array2d(X, dtype=DTYPE)
# Assign chunk of trees to jobs
n_jobs, n_trees, starts = _partition_trees(self)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_predict_proba)(
self.estimators_[starts[i]:starts[i + 1]],
X,
self.n_classes_,
self.n_outputs_)
for i in xrange(n_jobs))
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in xrange(1, len(all_proba)):
proba += all_proba[j]
proba /= self.n_estimators
else:
for j in xrange(1, len(all_proba)):
for k in xrange(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in xrange(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the mean predicted class log-probabilities of the trees in the forest.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. Classes are
ordered by arithmetical order.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in xrange(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(BaseForest, RegressorMixin):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
compute_importances=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
compute_importances=compute_importances,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = array2d(X, dtype=DTYPE)
# Assign chunk of trees to jobs
n_jobs, n_trees, starts = _partition_trees(self)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_predict_regression)(
self.estimators_[starts[i]:starts[i + 1]], X)
for i in xrange(n_jobs))
# Reduce
y_hat = sum(all_y_hat) / self.n_estimators
return y_hat
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of classifical
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=1)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_density : float, optional (default=0.1)
This parameter controls a trade-off in an optimization heuristic. It
controls the minimum density of the `sample_mask` (i.e. the
fraction of samples in the mask). If the density falls below this
threshold the mask is recomputed and the input data is packed
which results in data copying. If `min_density` equals to one,
the partitions are always represented as copies of the original
data. Otherwise, partitions are represented as bit masks (aka
sample masks).
Note: this parameter is tree-specific.
max_features : int, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If "auto", then `max_features=sqrt(n_features)` on
classification tasks and `max_features=n_features` on regression
problems.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
compute_importances : boolean, optional (default=True)
Whether feature importances are computed and stored into the
``feature_importances_`` attribute when calling fit.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel. If -1, then the number of jobs
is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
`estimators_`: list of DecisionTreeClassifier
The collection of fitted sub-estimators.
`feature_importances_` : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
`oob_score_` : float
Score of the training dataset obtained using an out-of-bag estimate.
`oob_decision_function_` : array, shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=1,
min_samples_leaf=1,
min_density=0.1,
max_features="auto",
bootstrap=True,
compute_importances=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_density",
"max_features", "random_state"),
bootstrap=bootstrap,
compute_importances=compute_importances,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_density = min_density
self.max_features = max_features
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifical
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=1)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_density : float, optional (default=0.1)
This parameter controls a trade-off in an optimization heuristic. It
controls the minimum density of the `sample_mask` (i.e. the
fraction of samples in the mask). If the density falls below this
threshold the mask is recomputed and the input data is packed
which results in data copying. If `min_density` equals to one,
the partitions are always represented as copies of the original
data. Otherwise, partitions are represented as bit masks (aka
sample masks).
Note: this parameter is tree-specific.
max_features : int, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If "auto", then `max_features=sqrt(n_features)` on
classification tasks and `max_features=n_features`
on regression problems.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
compute_importances : boolean, optional (default=True)
Whether feature importances are computed and stored into the
``feature_importances_`` attribute when calling fit.
oob_score : bool
whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel. If -1, then the number of jobs
is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
`estimators_`: list of DecisionTreeRegressor
The collection of fitted sub-estimators.
`feature_importances_` : array of shape = [n_features]
The feature mportances (the higher, the more important the feature).
`oob_score_` : float
Score of the training dataset obtained using an out-of-bag estimate.
`oob_prediction_` : array, shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=1,
min_samples_leaf=1,
min_density=0.1,
max_features="auto",
bootstrap=True,
compute_importances=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_density",
"max_features", "random_state"),
bootstrap=bootstrap,
compute_importances=compute_importances,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_density = min_density
self.max_features = max_features
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=1)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_density : float, optional (default=0.1)
This parameter controls a trade-off in an optimization heuristic. It
controls the minimum density of the `sample_mask` (i.e. the
fraction of samples in the mask). If the density falls below this
threshold the mask is recomputed and the input data is packed
which results in data copying. If `min_density` equals to one,
the partitions are always represented as copies of the original
data. Otherwise, partitions are represented as bit masks (aka
sample masks).
Note: this parameter is tree-specific.
max_features : int, string or None, optional (default="auto")
The number of features to consider when looking for the best split.
- If "auto", then `max_features=sqrt(n_features)` on
classification tasks and `max_features=n_features`
on regression problems.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
compute_importances : boolean, optional (default=True)
Whether feature importances are computed and stored into the
``feature_importances_`` attribute when calling fit.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel. If -1, then the number of jobs
is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
`estimators_`: list of DecisionTreeClassifier
The collection of fitted sub-estimators.
`feature_importances_` : array of shape = [n_features]
The feature mportances (the higher, the more important the feature).
`oob_score_` : float
Score of the training dataset obtained using an out-of-bag estimate.
`oob_decision_function_` : array, shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=1,
min_samples_leaf=1,
min_density=0.1,
max_features="auto",
bootstrap=False,
compute_importances=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_density",
"max_features", "random_state"),
bootstrap=bootstrap,
compute_importances=compute_importances,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_density = min_density
self.max_features = max_features
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=1)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_density : float, optional (default=0.1)
This parameter controls a trade-off in an optimization heuristic. It
controls the minimum density of the `sample_mask` (i.e. the
fraction of samples in the mask). If the density falls below this
threshold the mask is recomputed and the input data is packed
which results in data copying. If `min_density` equals to one,
the partitions are always represented as copies of the original
data. Otherwise, partitions are represented as bit masks (aka
sample masks).
Note: this parameter is tree-specific.
max_features : int, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If "auto", then `max_features=sqrt(n_features)` on
classification tasks and `max_features=n_features`
on regression problems.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
Note: this parameter is tree-specific.
compute_importances : boolean, optional (default=True)
Whether feature importances are computed and stored into the
``feature_importances_`` attribute when calling fit.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel. If -1, then the number of jobs
is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
`estimators_`: list of DecisionTreeRegressor
The collection of fitted sub-estimators.
`feature_importances_` : array of shape = [n_features]
The feature mportances (the higher, the more important the feature).
`oob_score_` : float
Score of the training dataset obtained using an out-of-bag estimate.
`oob_prediction_` : array, shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=1,
min_samples_leaf=1,
min_density=0.1,
max_features="auto",
bootstrap=False,
compute_importances=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_density",
"max_features", "random_state"),
bootstrap=bootstrap,
compute_importances=compute_importances,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_density = min_density
self.max_features = max_features
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as trees in the forest.
The dimensionality of the resulting representation is approximately
``n_estimators * 2 ** max_depth``.
Parameters
----------
n_estimators : int
Number of trees in the forest.
max_depth : int
Maximum depth of each tree.
min_samples_split : integer, optional (default=1)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_density : float, optional (default=0.1)
This parameter controls a trade-off in an optimization heuristic. It
controls the minimum density of the `sample_mask` (i.e. the
fraction of samples in the mask). If the density falls below this
threshold the mask is recomputed and the input data is packed
which results in data copying. If `min_density` equals to one,
the partitions are always represented as copies of the original
data. Otherwise, partitions are represented as bit masks (aka
sample masks).
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel. If -1, then the number of jobs
is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
Attributes
----------
`estimators_`: list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=1,
min_samples_leaf=1,
min_density=0.1,
n_jobs=1,
random_state=None,
verbose=0):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_density",
"max_features", "random_state"),
bootstrap=False,
compute_importances=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_density = min_density
self.max_features = 1
def fit(self, X, y=None):
"""Fit estimator.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input data used to build forests.
"""
self.fit_transform(X, y)
return self
def fit_transform(self, X, y=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input data used to build forests.
Returns
-------
X_transformed: sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
X = safe_asarray(X)
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y)
self.one_hot_encoder_ = OneHotEncoder()
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input data to be transformed.
Returns
-------
X_transformed: sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
| bsd-3-clause |
1a1a11a/mimircache | PyMimircache/__init__.py | 1 | 1336 | # coding=utf-8
""" PyMimircache a cache trace analysis platform.
.. moduleauthor:: Juncheng Yang <[email protected]>, Ymir Vigfusson
"""
import os
import sys
try:
import matplotlib
matplotlib.use('Agg')
except Exception as e:
print("WARNING: {}, fail to import matplotlib, "
"plotting function may be limited".format(e), file=sys.stderr)
cwd = os.getcwd()
sys.path.extend([cwd, os.path.join(cwd, "..")])
from PyMimircache.const import *
if not INSTALL_PHASE:
from PyMimircache.cacheReader.binaryReader import BinaryReader
from PyMimircache.cacheReader.vscsiReader import VscsiReader
from PyMimircache.cacheReader.csvReader import CsvReader
from PyMimircache.cacheReader.plainReader import PlainReader
from PyMimircache.profiler.cLRUProfiler import CLRUProfiler as CLRUProfiler
from PyMimircache.profiler.cGeneralProfiler import CGeneralProfiler
from PyMimircache.profiler.pyGeneralProfiler import PyGeneralProfiler
from PyMimircache.profiler.cHeatmap import CHeatmap
from PyMimircache.profiler.pyHeatmap import PyHeatmap
from PyMimircache.top.cachecow import Cachecow
from PyMimircache.version import __version__
# import logging
# logging.basicConfig(filename="log", filemode='w', format='%(levelname)s:%(asctime)s:%(message)s', level=logging.DEBUG)
| gpl-3.0 |
lrq3000/author-detector | authordetector/run.py | 1 | 33417 | #!/usr/bin/env python
# encoding: utf-8
#
# AuthorDetector
# Copyright (C) 2013 Larroque Stephen
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from auxlib import *
from authordetector.configparser import ConfigParser
import os, sys, StringIO
import pandas as pd
import time
import traceback
from collections import OrderedDict
json = import_module('ujson')
if json is None:
json = import_module('json')
if json is None:
raise RuntimeError('Unable to find a json implementation')
class Runner:
rootdir = 'authordetector'
## @var vars contain a dynamical dict of variables used for data mining, and will be passed to every other computational function
vars = {} # we create a reference at startup so that this dict can be passed as a reference to children objects
## Initialize a runner object, with all constructs necessary to use the algorithms and process data according to the provided configuration file and commandline arguments
# @param args recognized and processed commandline arguments
# @param extras extra commandline arguments that are not explicitly recognized (but will nevertheless be appended to the config file, so that you can overwrite pretty much any configuration parameter you want at commandline)
def init(self, args, extras):
self.vars = dict()
#-- Loading config
self.config = ConfigParser()
configfile = args['config']; del args['config'] # delete the config argument, which is at best a self reference
self.config.init(configfile)
self.config.load(args, extras, comments=True)
#-- Loading classes
for (submod, classname) in self.config.config["classes"].iteritems(): # for each item/module specified in classes
localname = submod
if self.config.get('classes_alias') and self.config.get('classes_alias').get(localname):
submod = self.config.get('classes_alias').get(localname)
else:
submod = localname
# if it is a list of classes, we load all the classes into a local list of classes
if type(classname) == type(list()):
#self.__dict__[localname] = {} # initializing the local list of classes
# for each class in the list
for oneclassname in classname:
# we add the class
self.addclass(localname, submod, oneclassname, True)
# if a special string "all" is supplied, we load all the classes into a local list of classes (if a class with a similar name to the filename exists - the classname can have mixed case, the case doesn't matter if the filename corresponds to the classname.lower())
elif classname == 'all':
#self.__dict__[localname] = {} # initializing the local list of classes
modlist = os.listdir(os.path.join(self.rootdir, submod)) # loading the list of files/submodules
modlist = list(set([os.path.splitext(mod)[0] for mod in modlist])) # strip out the extension + get only unique values (else we will get .py and .pyc filenames, which are in fact the same module)
# Trim out the base class and __init__
# Remove all base modules (modules whose names starts with 'base')
[modlist.remove(mod) for mod in modlist if mod.startswith('base')]
# Remove all __init__ modules (these are only used by the python interpreter)
if '__init__' in modlist:
modlist.remove('__init__')
# For each submodule
for classname2 in modlist:
full_package = '.'.join([self.rootdir, submod, classname2.lower()])
mod = import_module(full_package) # we need to load the package before being able to list the classes contained inside
# We list all objects contained in this module (normally we expect only one class, but nevermind)
for iclass, iclassname in [(obj, obj.__name__) for obj in [getattr(mod, name) for name in dir(mod)] if isinstance(obj, type)]:
# If the object is a class, and the class name is the same as the filename, we add an instance of this class!
if iclassname.lower() == classname2.lower():
# we add the class
self.addclass(localname, submod, iclassname, True)
# if we just have a string, we add the class that corresponds to this string
# Note: the format must be "submodule": "ClassName", where submodule is the folder containing the subsubmodule, and "ClassName" both the class name, and the subsubmodule filename ("classname.py")
else:
self.addclass(localname, submod, classname)
return True
## Instanciate dynamically a class and add it to the local dict
# @param name key property under which name the module will be accessible (eg: if name='reader', you will have self.reader) - can be set = submod in most cases
# @param submod name of the subfolder/submodule where the subsubmodule/class resides
# @param classname both the subsubmodule filename (eg: classname.py) and the class name (eg: ClassName)
# @param listofclasses if True, instanciate this class in a list of classes, instead of just directly a property of the current object (eg: instead of self.reader, you will get: self.reader["firstparser"], self.reader["secondparser"], etc...)
def addclass(self, name, submod, classname, listofclasses=False):
try:
# Import the class dynamically
aclass = import_class('.'.join([self.rootdir, submod, classname.lower()]), classname)
# If we have several classes, we append all of them in a subdict of the attribute assigned for this module ( eg: runner.MyModule[firstclass] )
if listofclasses:
# Create the attribute as an OrderedDict if it does not exist yet
if self.__dict__.get(name) is None: self.__dict__[name] = OrderedDict() # keep the order in which the items were given (all iterators will also follow the order). This allows to set in config the order in which we want the items to be executed.
# Managing several calls to the same class: if we try to call twice the same class (eg: DropEmptyFeatures) for example at the beginning of PreOptimization and also at the end, there will be only one call because we are using a dict to store the classes. Here we try to avoid that by renaming the class by appending a number at the end.
# If the class does not already exist in the dict, we can use the name as-is
if not classname.lower() in self.__dict__[name]:
self.__dict__[name][classname.lower()] = aclass(config=self.config, parent=self)
# Else the class already exists (and is called before in the routine), and thus we have to rename this one
else:
count = 2
# Try to rename and increment the counter until we can store the class
while 1:
# Ok, this class name + current counter does not exist, we can store it
newclassname = "%s_%s" % (classname.lower(), str(count))
if not newclassname in self.__dict__[name]:
self.__dict__[name][newclassname] = aclass(config=self.config, parent=self)
break # exit the While loop
# Else we increment the counter and continue
else:
count += 1
# Else we have only one class, we store it straight away in an attribute
else:
self.__dict__[name] = aclass(config=self.config, parent=self)
return True
except Exception, e:
package_full = '.'.join([self.rootdir, submod, classname.lower()])
print("CRITICAL ERROR: importing a class failed: classname: %s package: %s\nException: %s" % (package_full, classname, e.__repr__()))
traceback.print_exc() # print traceback
raise RuntimeError('Unable to import a class/module')
## Update the local dict vars of variables
#
# This function is used as a proxy to accept any arbitrary number of returned arguments from functions, and will store them locally, and then the vars dict will be passed onto other children objects
def updatevars(self, dictofvars):
# Create the local vars dict if it does not exist
if not hasattr(self, 'vars'):
self.vars = {}
# Update/add the values inside dictofvars (if it is a dictionary of variables)
if type(dictofvars) == type(dict()):
self.vars.update(dictofvars) # add new variables from dict and merge updated values for already existing variables
# Else, it may be a list or an object or just a scalar (this means the function is not conforming to the dev standards), then can't know where to put those results and we just memorize them inside a "lastout" entry as-is.
# In summary: unnamed variables gets stored as temporary variables which may be overwritten at any time by subsequent functions
else:
# Delete the previous output
if self.vars.get("lastout", None): del self.vars["lastout"]
# Save this output
self.vars.update({"lastout": dictofvars})
## Generically call one object and its method (if obj is a list, it will call the method of each and every one of the modules in the list)
# @param obj Object or list of objects
# @param method Method to call in the object(s) (as string)
# @param args Optional arguments to pass to the method (must be a dictionary, with they keys being the name of the variables)
# @param return_vars Return a value instead of updating the local vars dict
# @param verbose Print more details about the executed routine
# TODO: reduce the number of maintained dictionaries (there are 4: self.vars, allvars, dictofvars and args)
# TODO: fix return_vars, it does work, but sometimes there is a bleeding effect (mixing up local variables and arguments variables. The best would be to use local variables where needed, but use argument variables foremost, and keep tracking of argument variables that are changed)
def generic_call(self, obj, method, args=None, return_vars=False, verbose=False):
# Create the local dict of vars
allvars = dict() # input dict of vars
if return_vars: dictofvars = dict() # output dict of vars (if return_vars is True)
# Append the optional arguments to pass to methods
if args is not None and type(args) == dict:
allvars.update(args)
if return_vars: dictofvars.update(args) # args and dictofvars must have ascendance over everything else when using return_vars
# If we have a list of modules to call, we call the method of each and every one of those modules
if isinstance(obj, (dict, OrderedDict)):
# For every module in the list
for submodule in obj.itervalues():
# Print infos
if verbose:
print("Routine: Calling module %s..." % submodule.__class__.__name__)
sys.stdout.flush()
# Update the local dict of vars
allvars.update(self.vars)
if return_vars: allvars.update(dictofvars)
# Get the callable object's method
fullfunc = getattr(submodule, method)
# Call the specified function for the specified module
if not return_vars:
# By default we store in the local dict
self.updatevars(fullfunc(**allvars))
else:
# Else we update a temporary dictofvars and we return it at the end
dictofvars.update(fullfunc(**allvars))
# Force flusing the text into the terminal
sys.stdout.flush()
# Return the dictofvars at the end of the loop if the user wants to return the variables to the caller instead of storing them locally
if return_vars:
allvars.update(dictofvars) # return the input vars updated with the outputvars
return allvars
# Else if it is an object (thus only one module to call), we directly call its method
else:
# Print infos
if verbose: print("Routine: Calling module %s..." % obj.__class__.__name__)
# Get the callable object's method
fullfunc = getattr(obj, method)
# Update the local dict of vars
allvars.update(self.vars)
if return_vars: allvars.update(dictofvars)
# Call the specified function for the specified module
if not return_vars:
self.updatevars(fullfunc(**allvars))
else:
allvars.update(fullfunc(**allvars)) # return the input vars updated with the outputvars
return allvars
# Force flusing the text into the terminal
sys.stdout.flush()
## Execute a routine: call any module(s) given a list of dicts containing {"submodule name": "method of the class to call"}
# @param executelist A list containing the sequence of modules to launch (Note: the order of the contained elements matters!)
# @param verbose Print more details about the executed routine
def execute(self, executelist, verbose=False):
# Checking constraints first
if not self.check_constraints():
print("FATAL ERROR while checking constraints. Please check your configuration. Exiting.")
return False
# Loop through all modules in run_learn list
for mod in executelist:
# Catch exceptions: if a module fails, we continue onto the next one - TODO: try to set this option ("robust") in a config variable: for dev we want exceptions, in production maybe not (just a warning and then pass).
#try:
# Special case: this is a sublist, we run all the modules in the list in parallel
if type(mod) == type(list()):
self.generic_call(mod, verbose=verbose) # TODO: launch each submodule in parallel (using subprocess or threading, but be careful: Python's threads aren't efficient so this is not useful at all, and subprocess creates a new object, so how to communicate the computed/returned variables efficiently in memory?)
else:
# If it's a dict (specifying the module type and the method to call, format: {"moduletype":"method"})
if isinstance(mod, dict):
# Unpacking the dict
module = mod.keys()[0]
func = mod.values()[0]
# Else if it's a string, thus there's only the module type, we will call the default public method
elif isinstance(mod, basestring):
module = mod # it's just a string, the name of the category of modules to call
# For the method it's a bit more tricky: we try to get the publicmethod, declared in the base class of each category of modules (and thus inherited by modules)
# Special case: we defined multiples modules to load in "classes" config for this category of modules, so we just get publicmethod from the first module in the dict
if isinstance (self.__dict__[module], (dict, OrderedDict)):
func = self.__dict__[module].itervalues().next().publicmethod
# Else it's a single module, we can get the publicmethod right away
else:
func = self.__dict__[module].publicmethod
# Else it's not a recognized format, we pass
else:
continue
# Call the module's method
self.generic_call(self.__dict__[module], func, verbose=verbose)
#except Exception, e:
#print "Exception when executing the routine: %s" % str(e)
# Force flusing the text into the terminal
sys.stdout.flush()
return True
## Write down the parameters into a file
# Format of the file: json structure consisting of a dict where the keys are the names of the vars, and the values are strings encoding the data in csv format
# TODO: replace by pandas.to_json() when the feature will be put back in the main branch?
@staticmethod
def save_vars(jsonfile, dictofvars, exclude=None):
## Simple function to insert an item in either a dict or a list
def addtolistordict(finaldict, key, item):
if isinstance(finaldict, (dict)):
finaldict[key] = item
elif isinstance(finaldict, (list, tuple)):
finaldict.insert(key, item)
## Recursively convert variables into a json intelligible format
def convert_vars(dictofvars, exclude=None):
# Loading the correct generator depending on the type of dictofvars
# If it's a dict we iter over items
if (isinstance(dictofvars, dict)):
iter = dictofvars.iteritems()
finaldict = dict()
# If it's a list we enumerate it
elif (isinstance(dictofvars, (list, tuple))):
iter = enumerate(dictofvars)
finaldict = list()
# For each object in our dict of variables
for (key, item) in iter:
try:
# If this variable is in the exclude list, we skip it
if exclude and key in exclude:
continue
# Try to save the pandas object as CSV
#try:
# Only try if there is a method to_csv()
# TODO: replace by pandas.to_json() when the feature will be put back in the main branch?
if (hasattr(item, 'to_csv')):
out = StringIO.StringIO()
item.to_csv(out)
addtolistordict(finaldict, key, out.getvalue())
# Else it is probably not a Pandas object since this method is not available, we just save the value as-is or recursively convert pandas objects if possible
else:
# If possible, try to convert the item to a list
if (hasattr(item, 'tolist')):
item = item.tolist()
# If this is a recursive object, try to convert the variables inside (they may be pandas objects)
if (isinstance(item, (list, dict, tuple)) and not isinstance(item, basestring)):
addtolistordict(finaldict, key, convert_vars(item))
# Else just save the item as-is
else:
addtolistordict(finaldict, key, item)
# Else if it is not a pandas object, we save as-is
except Exception, e:
addtolistordict(finaldict, key, item)
print("Notice: couldn't correctly convert the value for the key %s. The value will be saved as-is. Error: %s" % (key, e))
pass
return finaldict
# Convert recursively the dict of vars
finaldict = convert_vars(dictofvars, exclude)
# Save the dict of csv data as a JSON file
try:
f = open(jsonfile, 'wb') # open in binary mode to avoid line returns translation (else the reading will be flawed!). We have to do it both at saving and at reading.
f.write( json.dumps(finaldict, sort_keys=True, indent=4) ) # write the file as a json serialized string, but beautified to be more human readable
f.close()
return True
except Exception, e:
print("Exception while trying to save the parameters into the parameters file: %s. The parameters have not been saved!" % e)
return False
## Load the parameters from a file
# Format of the file: json structure consisting of a dict where the keys are the names of the vars, and the values are strings encoding the data in csv format
# TODO: replace by pandas.from_json() when the feature will be put back in the main branch?
# @param jsonfile Path to the json file containing the variables to load
# @param prefixkey A prefix to prepend to the root keys (only for the root variables!)
@staticmethod
def load_vars(jsonfile, prefixkey=None):
## Simple function to insert an item in either a dict or a list
def addtolistordict(finaldict, key, item):
if isinstance(finaldict, (dict)):
finaldict[key] = item
elif isinstance(finaldict, (list, tuple)):
finaldict.insert(key, item)
## Convert back variables and returns a dict
# This is mainly because we need to convert back pandas objects, because pandas does not provide a to_json() function anymore
# TODO: replace all this by a simple to_json() when it will be fixed in Pandas?
# @param d Can be either a dict or a list
# @param prefixkey A prefix to prepend to the root keys (only for the root variables!)
def convert_vars(d, prefixkey=None, level=0):
# Loading the correct generator depending on the type of dictofvars
# If it's a dict we iter over items
if (isinstance(d, dict)):
iter = d.iteritems()
dictofvars = dict()
# If it's a list we enumerate it
elif (isinstance(d, (list, tuple))):
iter = enumerate(d)
dictofvars = list()
# For each item in the json
for key, item in iter:
# Prepend the prefix to key if specified, and if we are at the root (we don't prefix below)
if prefixkey and isinstance(prefixkey, (basestring, str)) and level == 0:
key = prefixkey + key
# TODO: Pandas objects are stored in a string for the moment because to_json() was removed. Fix this with a more reliable way to decode those structures in the future.
if (isinstance(item, basestring)):
# Try to load a pandas object (Series or DataFrame)
try:
buf = StringIO.StringIO(item)
df = pd.read_csv(buf, index_col=0, header=0) # by default, load as a DataFrame
# if in fact it's a Series (a vector), we reload as a Series
# TODO: replace all this by pd.read_csv(buf, squeeze=True) when squeeze will work!
if df.shape[1] == 1:
buf.seek(0)
addtolistordict(dictofvars, key, pd.Series.from_csv(buf))
# Failsafe: in case we tried to load a Series but it didn't work well (pandas will failsafe and return the original string), we finally set as a DataFrame
if (type(dictofvars[key]) != type(pd.Series()) and type(dictofvars[key]) != type(pd.DataFrame()) or dictofvars[key].dtype == object ): # if it's neither a Series nor DataFrame, we expect the item to be a DataFrame and not a Series
addtolistordict(dictofvars, key, df)
# Else if it is really a DataFrame, we set it as DataFrame
else:
if (not df.empty):
addtolistordict(dictofvars, key, df)
# In the case it is really a string (the resulting pandas object is empty), we just store the string as-is
else:
addtolistordict(dictofvars, key, item)
# If it didn't work well, we load the object as-is (maybe it's simply a string)
except Exception, e:
addtolistordict(dictofvars, key, item)
print("Exception: couldn't correctly load the value for the key %s. Error: %s. This item will be skipped." % (key, e))
pass
# Else it is already a converted Python object (eg: a list, a dict, a number, etc...), we just use it as-is
else:
if isinstance(item, (dict, list, tuple)) and not isinstance(item, basestring):
addtolistordict(dictofvars, key, convert_vars(item, level=level+1))
else:
addtolistordict(dictofvars, key, item)
return dictofvars
# Open the file
with open(jsonfile, 'rb') as f:
filecontent = f.read()
# Load the json tree
jsontree = json.loads(filecontent)
# Convert recursively the dict of vars (for pandas objects)
dictofvars = convert_vars(jsontree, prefixkey)
# Return the list of variables/parameters
return dictofvars
## Check constraints integrity based on classes' definitions
# This will NOT stop execution, but rather display a warning that integrity might not be safe and thus errors can be encountered in execution. But that might not be the case if the user knows what s/he's doing.
def check_constraints(self):
def constraint_error(submodname, constraint):
print("WARNING: in submodule %s constraint %s is not satisfied! Please check your config." % (submodname, constraint))
print("Checking constraints...")
sys.stdout.flush()
#== Checking workflow
print("Checking constraints in workflow...")
if self.vars['Mode'] == 'Learning':
routine = self.config.get('workflow_learn')
else:
routine = self.config.get('workflow')
prevmod = list() # list of the past modules
# Iterate over all modules categories
for mod in routine:
# mod is an item of the routine, and it can either be a string (module category name), or a dict (modname + method)
if isinstance(mod, (dict, OrderedDict)): # in this case, we need to unpack the name
modname = mod.iterkeys().next() # unpack the key
else: # else it's just a string, it's directly the name
modname = mod
# Get the module object
module = self.__dict__[modname]
# Little trick to do a for each loop in any case (in case we have only one submodule for this category of modules, or if we have a dict of submodules)
if (isinstance(module, (dict, OrderedDict))):
submods = module
else: # only one submodule, we convert it do a dict
classname = module.__class__.__name__.lower()
submods = {classname: module}
# For each submodule
for submodname, submod in submods.iteritems():
# If some constraints are set for this submodule
if getattr(submod, 'constraints', None) is not None:
#-- Checking "after" constraint
if submod.constraints.get('after') is not None:
# If this submodule must be launched after another module, but this other module was not set before in the workflow, then warning
if submod.constraints['after'] not in prevmod:
constraint_error(submodname, "%s:%s" % ('after', submod.constraints['after']))
# Add current submodule name into the list of past modules
prevmod.append(submodname)
# Flush output
sys.stdout.flush()
# Add current module category into the list of past modules
prevmod.append(modname)
return True
## Learning routine: Train the system to learn how to detect cheating
def learn(self, executelist=None):
# Specify the mode
self.updatevars({'Mode': 'Learning'})
self.config.update({'Mode': 'Learning'})
# Reload the texts config
if self.__dict__.get('reader', None):
self.reader.reloadconfig() # make sure the textreader updates the list of texts it must load (depending on Mode)
# We can pass an execution list either as an argument (used for recursion) or in the configuration
if not executelist:
executelist = self.config.get('workflow_learn', None)
# Standard learning routine
# If no routine is given, then we execute the standard learning routine
if not executelist:
executelist = []
if self.__dict__.get('preprocessing', None):
executelist.append({"preprocessing": "process"})
executelist.append({"featuresextractor": "extract"})
if self.__dict__.get('patternsextractor', None):
executelist.append({"patternsextractor": "extract"})
if self.__dict__.get('merger', None):
executelist.append({"merger": "merge"})
if self.__dict__.get('postprocessing', None):
executelist.append({"postprocessing": "process"})
# Initialization, do various stuff
print("Initializing, this can take a few moments, please wait..."); sys.stdout.flush()
# Execute all modules of the routine (either of config['workflow_learn'] or the standard routine)
if not self.execute(executelist, verbose=True): # We generally prefer to print all infos when learning
return False
print('All done!')
# End of learning, we save the parameters if a parametersfile was specified
if self.config.get('parametersfile', None):
Runner.save_vars(self.config.get('parametersfile'), self.vars, ['X', 'Y', 'X_raw', 'Weights', 'Mode']) # save all vars but X and Y (which may be VERY big and aren't parameters anyway)
print('Learned parameters saved in: %s' % self.config.get('parametersfile'))
return True
## Detection routine: identify the labels for the unlabeled texts
def run(self, executelist=None):
# Specify the mode
self.updatevars({'Mode': 'Detection'})
self.config.update({'Mode': 'Detection'})
if self.__dict__.get('reader', None):
self.reader.reloadconfig() # make sure the textreader updates the list of texts it must load (depending on Mode)
# Load the parameters if a file is specified
if self.config.get('parametersfile', None):
self.updatevars(Runner.load_vars(self.config.config['parametersfile'], prefixkey='L_'))
# We can pass an execution list either as an argument (used for recursion) or in the configuration
if not executelist:
executelist = self.config.get('workflow', None)
# Standard detection routine
# If no routine is given, then we execute the standard detection routine
if not executelist:
executelist = []
if self.__dict__.get('preprocessing', None):
executelist.append({"preprocessing": "process"})
executelist.append({"featuresextractor": "extract"})
if self.__dict__.get('patternsextractor', None):
executelist.append({"patternsextractor": "extract"})
if self.__dict__.get('postprocessing', None):
executelist.append({"postprocessing": "process"})
executelist.append({"detector": "detect"})
# Execute all modules of the routine (either of config['workflow'] or the standard routine)
if not self.execute(executelist, verbose=True): # We generally prefer to print all infos
return False
# End of identification, we save the results in a file if specified
if self.config.get('resultsfile', None):
Runner.save_vars(self.config.get('resultsfile'), {'Result': self.vars.get('Result'), 'Result_details': self.vars.get('Result_details')}) # save the Result and Result_details variable
print('Identification results saved in: %s' % self.config.get('resultsfile'))
return True
if __name__ == '__main__':
runner = Runner()
runner.init()
runner.run() | gpl-3.0 |
dgwakeman/mne-python | examples/visualization/plot_ssp_projs_sensitivity_map.py | 18 | 1286 | """
==================================
Sensitivity map of SSP projections
==================================
This example shows the sources that have a forward field
similar to the first SSP vector correcting for ECG.
"""
# Author: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
from mne import read_forward_solution, read_proj, sensitivity_map
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
ecg_fname = data_path + '/MEG/sample/sample_audvis_ecg_proj.fif'
fwd = read_forward_solution(fname, surf_ori=True)
projs = read_proj(ecg_fname)
projs = projs[3:][::2] # take only one projection per channel type
# Compute sensitivity map
ssp_ecg_map = sensitivity_map(fwd, ch_type='grad', projs=projs, mode='angle')
###############################################################################
# Show sensitivity map
plt.hist(ssp_ecg_map.data.ravel())
plt.show()
args = dict(clim=dict(kind='value', lims=(0.2, 0.6, 1.)), smoothing_steps=7,
hemi='rh', subjects_dir=subjects_dir)
ssp_ecg_map.plot(subject='sample', time_label='ECG SSP sensitivity', **args)
| bsd-3-clause |
1kastner/analyse_weather_data | interpolation/interpolator/neural_network_interpolator.py | 1 | 7784 | """
"""
import sys
import logging
import os.path
import datetime
import platform
import pandas
import numpy
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import mean_squared_error
from filter_weather_data import PROCESSED_DATA_DIR
from interpolation.interpolator.logger import StreamToLogger
if platform.uname()[1].startswith("ccblade"): # the output files can turn several gigabyte so better not store them
# on a network drive
PROCESSED_DATA_DIR = "/export/scratch/1kastner"
pandas.set_option("display.max_columns", 500)
pandas.set_option("display.max_rows", 10)
def cloud_cover_converter(val):
if val in ["SKC", "CLR", "NSC", "CAVOC"]: # 0 octas
return 0
elif val == "FEW": # 1-2 octas
return 1
elif val == "SCT": # 3-4 octas
return 2
elif val == "BKN": # 5-7 octas
return 3
elif val == "OVC": # 8 octas
return 4
elif val == "VV": # clouds can not be seen because of rain or fog
return 5
else:
raise RuntimeError(val + "not found")
def load_data(file_name, start_date, end_date, verbose=False):
"""
:param end_date:
:param start_date:
:param file_name: File name, e.g. training_data.csv, evaluation_data.csv
:return: (input_data, target) scikit-conform data
"""
csv_file = os.path.join(
PROCESSED_DATA_DIR,
"neural_networks",
file_name
)
data_df = pandas.read_csv(
csv_file,
index_col="datetime",
parse_dates=["datetime"],
converters={"cloudcover_eddh": cloud_cover_converter}
)
cloud_cover_df = pandas.get_dummies(data_df.cloudcover_eddh, prefix="cloudcover_eddh")
data_df.drop("cloudcover_eddh", axis=1, inplace=True)
cloud_cover_df.set_index(data_df.index, inplace=True)
df_hour = pandas.get_dummies(data_df.index.hour, prefix="hour")
df_hour.set_index(data_df.index, inplace=True)
data_df = data_df.assign(**{column: df_hour[column] for column in df_hour.columns})
data_df = data_df.assign(**{column: cloud_cover_df[column] for column in cloud_cover_df.columns})
data_df = data_df.loc[start_date:end_date]
data_df.reset_index(inplace=True, drop=True)
# this is now binary encoded, so no need for it anymore
# no data means no windgusts were measured, not the absence of measurement instruments
data_df["windgust_eddh"].fillna(0, inplace=True)
# drop columns with NaN, e.g. precipitation at airport is currently not reported at all
data_df.drop("precipitation_eddh", axis=1, inplace=True)
old_len = len(data_df)
# neural networks can not deal with NaN values
data_df.dropna(axis='index', how="any", inplace=True)
new_len = len(data_df)
logging.debug("old: %i, new: %i" % (old_len, new_len))
logging.debug("percentage: %i" % ((old_len / new_len) * 100))
# try to predict temperature
target_df = pandas.DataFrame(data_df.temperature)
# based on information served by airport + learned patterns, so no data from the same private weather station itself
input_df = data_df
for attribute in data_df.columns:
if (
not attribute.endswith("_eddh")
and attribute not in ("lat", "lon")
and not attribute.startswith("hour_")
and not attribute.startswith("month_")
and not "cloudcover" in attribute
):
input_df.drop(attribute, 1, inplace=True)
if verbose:
logging.debug(input_df.head(1))
logging.debug(target_df.head(1))
# only numpy arrays conform with scikit-learn
input_data = input_df.values
target = target_df.values
return input_data, target
def train(mlp_regressor, start_date, end_date, verbose=False):
input_data, target = load_data("training_data.csv", start_date, end_date, verbose=verbose)
if len(input_data) == 0 or len(target) == 0:
logging.warning("training failed because of lack of data")
load_data("training_data.csv", start_date, end_date, verbose=True)
return
mlp_regressor.fit(input_data, target)
predicted_values = mlp_regressor.predict(input_data)
score = numpy.sqrt(mean_squared_error(target, predicted_values))
logging.info("Training RMSE: %.3f" % score)
def evaluate(mlp_regressor, start_date, end_date, verbose=False):
input_data, target = load_data("evaluation_data.csv", start_date, end_date, verbose=verbose)
if len(input_data) == 0 or len(target) == 0:
logging.warning("training failed because of lack of data")
load_data("evaluation_data.csv", start_date, end_date, verbose=True)
return
predicted_values = mlp_regressor.predict(input_data)
score = numpy.sqrt(mean_squared_error(target, predicted_values))
logging.info("Evaluation RMSE: %.3f" % score)
def run_experiment(hidden_layer_sizes, number_months=12, learning_rate=.001):
"""
:param hidden_layer_sizes: The hidden layers, e.g. (40, 10)
:return:
"""
mlp_regressor = MLPRegressor(
hidden_layer_sizes=hidden_layer_sizes,
activation='relu', # most likely linear effects
solver='adam', # good choice for large data sets
alpha=0.0001, # L2 penalty (regularization term) parameter.
batch_size='auto',
learning_rate_init=learning_rate,
max_iter=200,
shuffle=True,
random_state=None,
tol=0.0001,
#verbose=True,
verbose=False,
warm_start=False, # erase previous solution
early_stopping=False, # stop if no increase during validation
validation_fraction=0.1, # belongs to early_stopping
beta_1=0.9, # solver=adam
beta_2=0.999, # solver=adam
epsilon=1e-08 # solver=adam
)
setup_logger(hidden_layer_sizes, learning_rate)
logging.info("hidden_layer_sizes=%s" % str(hidden_layer_sizes))
logging.info("learning_rate=%f" % learning_rate)
for month in range(1, number_months):
month_learned = "2016-%02i" % month
logging.info("learn month %s" % month_learned)
train(mlp_regressor, month_learned, month_learned, verbose=(month == 1))
month_not_yet_learned = "2016-%02i" % (month + 1)
logging.info("validate with month %s" % month_not_yet_learned)
evaluate(mlp_regressor, month_not_yet_learned, month_not_yet_learned)
logging.info(mlp_regressor.get_params())
logger = logging.getLogger()
handlers = logger.handlers[:]
for handler in handlers:
handler.close()
logger.removeHandler(handler)
def setup_logger(hidden_layer_sizes, learning_rate):
log = logging.getLogger('')
log.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(formatter)
log.addHandler(console_handler)
file_name = "interpolation_{date}_neural_network_{hidden_layer_sizes}_lr{lr}.log".format(
hidden_layer_sizes="-".join([str(obj) for obj in hidden_layer_sizes]),
date=datetime.datetime.now().isoformat().replace(":", "-").replace(".", "-"),
lr=learning_rate
)
path_to_file_to_log_to = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.pardir,
"log",
file_name
)
file_handler = logging.FileHandler(path_to_file_to_log_to)
file_handler.setFormatter(formatter)
log.addHandler(file_handler)
log.propagate = False
sys.stderr = StreamToLogger(log, logging.ERROR)
log.info("### Start new logging")
return log
if __name__ == "__main__":
run_experiment((3,), number_months=2)
| agpl-3.0 |
mikebenfield/scikit-learn | examples/linear_model/plot_sparse_logistic_regression_20newsgroups.py | 56 | 4172 | """
=====================================================
Multiclass sparse logisitic regression on newgroups20
=====================================================
Comparison of multinomial logistic L1 vs one-versus-rest L1 logistic regression
to classify documents from the newgroups20 dataset. Multinomial logistic
regression yields more accurate results and is faster to train on the larger
scale dataset.
Here we use the l1 sparsity that trims the weights of not informative
features to zero. This is good if the goal is to extract the strongly
discriminative vocabulary of each class. If the goal is to get the best
predictive accuracy, it is better to use the non sparsity-inducing l2 penalty
instead.
A more traditional (and possibly better) way to predict on a sparse subset of
input features would be to use univariate feature selection followed by a
traditional (l2-penalised) logistic regression model.
"""
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
print(__doc__)
# Author: Arthur Mensch
t0 = time.clock()
# We use SAGA solver
solver = 'saga'
# Turn down for faster run time
n_samples = 10000
# Memorized fetch_rcv1 for faster access
dataset = fetch_20newsgroups_vectorized('all')
X = dataset.data
y = dataset.target
X = X[:n_samples]
y = y[:n_samples]
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=42,
stratify=y,
test_size=0.1)
train_samples, n_features = X_train.shape
n_classes = np.unique(y).shape[0]
print('Dataset 20newsgroup, train_samples=%i, n_features=%i, n_classes=%i'
% (train_samples, n_features, n_classes))
models = {'ovr': {'name': 'One versus Rest', 'iters': [1, 3]},
'multinomial': {'name': 'Multinomial', 'iters': [1, 3, 7]}}
for model in models:
# Add initial chance-level values for plotting purpose
accuracies = [1 / n_classes]
times = [0]
densities = [1]
model_params = models[model]
# Small number of epochs for fast runtime
for this_max_iter in model_params['iters']:
print('[model=%s, solver=%s] Number of epochs: %s' %
(model_params['name'], solver, this_max_iter))
lr = LogisticRegression(solver=solver,
multi_class=model,
C=1,
penalty='l1',
fit_intercept=True,
max_iter=this_max_iter,
random_state=42,
)
t1 = time.clock()
lr.fit(X_train, y_train)
train_time = time.clock() - t1
y_pred = lr.predict(X_test)
accuracy = np.sum(y_pred == y_test) / y_test.shape[0]
density = np.mean(lr.coef_ != 0, axis=1) * 100
accuracies.append(accuracy)
densities.append(density)
times.append(train_time)
models[model]['times'] = times
models[model]['densities'] = densities
models[model]['accuracies'] = accuracies
print('Test accuracy for model %s: %.4f' % (model, accuracies[-1]))
print('%% non-zero coefficients for model %s, '
'per class:\n %s' % (model, densities[-1]))
print('Run time (%i epochs) for model %s:'
'%.2f' % (model_params['iters'][-1], model, times[-1]))
fig = plt.figure()
ax = fig.add_subplot(111)
for model in models:
name = models[model]['name']
times = models[model]['times']
accuracies = models[model]['accuracies']
ax.plot(times, accuracies, marker='o',
label='Model: %s' % name)
ax.set_xlabel('Train time (s)')
ax.set_ylabel('Test accuracy')
ax.legend()
fig.suptitle('Multinomial vs One-vs-Rest Logistic L1\n'
'Dataset %s' % '20newsgroups')
fig.tight_layout()
fig.subplots_adjust(top=0.85)
run_time = time.clock() - t0
print('Example run in %.3f s' % run_time)
plt.show()
| bsd-3-clause |
johnbachman/rasmodel | src/REM/RAF_module/BRAF_module.py | 6 | 3777 | """ Detailed mehanistic model of BRAF based on Neal Rossen paper. """
from pysb import *
from pysb.util import alias_model_components
def monomers():
Monomer('BRAF', ['ras', 'd', 'vem', 'erk'])
Monomer('Vem', ['raf'])
# IC values
# --------
Parameter('BRAF_0', 1e5)
Parameter('Vem_0', 1000)
alias_model_components()
# Initial conditions
# ------------------
Initial(BRAF(d=None, ras=None, erk=None, vem=None), BRAF_0)
Initial(Vem(raf=None), Vem_0)
def BRAF_dynamics():
# Parameters
# -----------
Parameter('kaf', 1e-6)
Parameter('kar', 1)
Parameter('kbf', 0.5) # 1)
Parameter('kbr', 1e-11)
Parameter('kcf', 1)
Parameter('kcr', 0.0001)
Parameter('kdf', 1)
Parameter('kdr', 0.1)
Parameter('kef', 1e-2)
Parameter('ker', 0.1)
Parameter('kff', 1e-5)
Parameter('kfr', 1)
Parameter('kgf', 1e-11)
Parameter('kgr', 1)
Parameter('khf', 1e-2) # 100)
Parameter('khr', 1) # 1)
Parameter('koff', 1)
alias_model_components()
# Rules
# -----
# BRAF dimerization
Rule('BRAF_dimerization',
BRAF(d=None, ras=None) + BRAF(d=None, ras=None, vem=None) <>
BRAF(d=1, ras=None) % BRAF(d=1, ras=None, vem=None), kaf, kar)
# KRAS binding BRAF monomers
Rule('KRAS_binding_BRAF_monomers',
BRAF(ras=None, d=None) + KRAS(raf=None, state='gtp') <>
BRAF(ras=1, d=None) % KRAS(raf=1, state='gtp'), kdf, kdr)
# KRAS binding BRAF dimers
Rule('KRAS_binding_BRAF_dimers',
BRAF(ras=None, d=1) % BRAF(d=1) +
KRAS(raf=None, state='gtp') <>
BRAF(ras=2, d=1) % BRAF(d=1) %
KRAS(raf=2, state='gtp'), kbf, kbr)
# KRAS:BRAF dimerization
Rule('KRASBRAF_dimerization',
BRAF(d=None, ras=ANY) + BRAF(d=None, ras=ANY, vem=None) <>
BRAF(d=1, ras=ANY) % BRAF(d=1, ras=ANY, vem=None), kcf, kcr)
# BRAF:Vem dimerization to give 2(BRAF:Vem) g = a * f
Rule('BRAF_Vem_dimerization',
BRAF(d=None, ras=None, vem=ANY) + BRAF(d=None, ras=None, vem=ANY) <>
BRAF(d=1, ras=None, vem=ANY) % BRAF(d=1, ras=None, vem=ANY), kgf, kgr)
# KRAS:BRAF:Vem dimerization to give 2( KRAS:BRAF:Vem) h = c * a
Rule('KRAS_BRAF_Vem_dimerization',
BRAF(d=None, ras=ANY, vem=ANY) + BRAF(d=None, ras=ANY, vem=ANY) <>
BRAF(d=1, ras=ANY, vem=ANY) % BRAF(d=1, ras=ANY, vem=ANY), khf, khr)
# 1st Vemurafenib binds
Rule('First_binding_Vemurafenib',
BRAF(vem=None) % BRAF(vem=None) + Vem(raf=None) <>
BRAF(vem=1) % BRAF(vem=None) % Vem(raf=1), kef, ker)
# 2nd Vemurafenib binding
Rule('Second_binding_vemurafenib',
BRAF(vem=None) % BRAF(vem=ANY) + Vem(raf=None) <>
BRAF(vem=1) % BRAF(vem=ANY) % Vem(raf=1), kff, kfr)
# Vemurafenib binds BRAF monomer
Rule('Vemurafenib_binds_BRAF_monomer',
BRAF(vem=None, d=None) + Vem(raf=None) <>
BRAF(vem=1, d=None) % Vem(raf=1), kef, ker)
# Release KRAS:GDP from BRAF
Rule('KRAS_GDP_dissoc_BRAF',
KRAS(state='gdp', raf=1) % BRAF(ras=1) >>
KRAS(state='gdp', raf=None) + BRAF(ras=None), koff)
def observables():
# Observables
# ----------
Observable('BRAF_WT_active',
BRAF(d=ANY, vem=None))
Observable('BRAF_V600E_active',
BRAF(vem=None))
# if __name__ == '__main__':
# from pysb.integrate import Solver
# import matplotlib.pyplot as plt
# import numpy as np
# ts = np.linspace(0, 100, 100)
# solver = Solver(model, ts)
# solver.run()
# plt.figure()
# plt.plot(ts, solver.yobs['BRAF_WT_active'], label='WT')
# plt.plot(ts, solver.yobs['BRAF_V600E_active'], label='V600E')
# plt.legend()
# plt.show()
| mit |
davidkuep/pyiso | setup.py | 1 | 2414 | from setuptools import setup
import codecs
import os
import re
# to release:
# python setup.py register sdist bdist_egg upload
here = os.path.abspath(os.path.dirname(__file__))
# Read the version number from a source file.
# Why read it, and not import?
# see https://groups.google.com/d/topic/pypa-dev/0PkjVpcxTzQ/discussion
# https://github.com/pypa/sampleproject/blob/master/setup.py
def find_version(*file_paths):
# Open in Latin-1 so that we avoid encoding errors.
# Use codecs.open for Python 2 compatibility
with codecs.open(os.path.join(here, *file_paths), 'r', 'latin1') as f:
version_file = f.read()
# The version line must have the form
# __version__ = 'ver'
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
# Get the long description from the relevant file
with codecs.open('README.md', encoding='utf-8') as f:
long_description = f.read()
setup(
name='pyiso',
packages=['pyiso'],
version=find_version('pyiso', '__init__.py'),
description='Python client libraries for ISO and other power grid data sources.',
long_description=long_description,
author='Anna Schneider',
author_email='[email protected]',
url='https://github.com/WattTime/pyiso',
license='Apache',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
test_suite='nose.collector',
install_requires=[
'beautifulsoup4',
'pandas>=0.15',
'python-dateutil',
'pytz',
'requests',
'celery>=3.1',
'xlrd',
'lxml',
'html5lib',
],
)
| apache-2.0 |
Neuroglycerin/neukrill-net-work | generate_hlf_testcache_6aug.py | 1 | 1993 | #!/usr/bin/env python
"""
Generate a cache of all the ComputerVision highlevelfeatures to send to Pylearn2
"""
from __future__ import division
import neukrill_net.augment
import neukrill_net.highlevelfeatures
import neukrill_net.utils
import copy
import numpy as np
from sklearn.externals import joblib
# Define output path
pkl_path1 = '/disk/data1/s1145806/cached_hlf_test6_data_raw.pkl'
pkl_path2 = '/disk/data1/s1145806/cached_hlf_test6_raw.pkl'
pkl_path3 = '/disk/data1/s1145806/cached_hlf_test6_data_ranged.pkl'
pkl_path4 = '/disk/data1/s1145806/cached_hlf_test6_ranged.pkl'
pkl_path5 = '/disk/data1/s1145806/cached_hlf_test6_data_posranged.pkl'
pkl_path6 = '/disk/data1/s1145806/cached_hlf_test6_posranged.pkl'
# Define which basic attributes to use
attrlst = ['height','width','numpixels','sideratio','mean','std','stderr',
'propwhite','propnonwhite','propbool']
# Parse the data
settings = neukrill_net.utils.Settings('settings.json')
X,y = neukrill_net.utils.load_rawdata(settings.image_fnames)
# Combine all the features we want to use
hlf_list = []
hlf_list.append( neukrill_net.highlevelfeatures.BasicAttributes(attrlst) )
hlf_list.append( neukrill_net.highlevelfeatures.ContourMoments() )
hlf_list.append( neukrill_net.highlevelfeatures.ContourHistogram() )
hlf_list.append( neukrill_net.highlevelfeatures.ThresholdAdjacency() )
hlf_list.append( neukrill_net.highlevelfeatures.ZernikeMoments() )
hlf_list.append( neukrill_net.highlevelfeatures.Haralick() )
# hlf_list.append( neukrill_net.highlevelfeatures.CoocurProps() )
augs = {'units': 'uint8',
'rotate': 3,
'rotate_is_resizable': 1,
'flip': 1}
aug_fun = neukrill_net.augment.augmentation_wrapper(**augs)
hlf = neukrill_net.highlevelfeatures.MultiHighLevelFeature(hlf_list, augment_func=aug_fun)
# Save the raw values of every feature
X_raw = hlf.generate_cache(X)
# Save the feature matrix to disk
joblib.dump(X_raw, pkl_path1)
| mit |
sonnyhu/scikit-learn | examples/ensemble/plot_partial_dependence.py | 54 | 4704 | """
========================
Partial Dependence Plots
========================
Partial dependence plots show the dependence between the target function [2]_
and a set of 'target' features, marginalizing over the
values of all other features (the complement features). Due to the limits
of human perception the size of the target feature set must be small (usually,
one or two) thus the target features are usually chosen among the most
important features
(see :attr:`~sklearn.ensemble.GradientBoostingRegressor.feature_importances_`).
This example shows how to obtain partial dependence plots from a
:class:`~sklearn.ensemble.GradientBoostingRegressor` trained on the California
housing dataset. The example is taken from [1]_.
The plot shows four one-way and one two-way partial dependence plots.
The target variables for the one-way PDP are:
median income (`MedInc`), avg. occupants per household (`AvgOccup`),
median house age (`HouseAge`), and avg. rooms per household (`AveRooms`).
We can clearly see that the median house price shows a linear relationship
with the median income (top left) and that the house price drops when the
avg. occupants per household increases (top middle).
The top right plot shows that the house age in a district does not have
a strong influence on the (median) house price; so does the average rooms
per household.
The tick marks on the x-axis represent the deciles of the feature values
in the training data.
Partial dependence plots with two target features enable us to visualize
interactions among them. The two-way partial dependence plot shows the
dependence of median house price on joint values of house age and avg.
occupants per household. We can clearly see an interaction between the
two features:
For an avg. occupancy greater than two, the house price is nearly independent
of the house age, whereas for values less than two there is a strong dependence
on age.
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning Ed. 2", Springer, 2009.
.. [2] For classification you can think of it as the regression score before
the link function.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.datasets.california_housing import fetch_california_housing
def main():
cal_housing = fetch_california_housing()
# split 80/20 train-test
X_train, X_test, y_train, y_test = train_test_split(cal_housing.data,
cal_housing.target,
test_size=0.2,
random_state=1)
names = cal_housing.feature_names
print("Training GBRT...")
clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,
learning_rate=0.1, loss='huber',
random_state=1)
clf.fit(X_train, y_train)
print(" done.")
print('Convenience plot with ``partial_dependence_plots``')
features = [0, 5, 1, 2, (5, 1)]
fig, axs = plot_partial_dependence(clf, X_train, features,
feature_names=names,
n_jobs=3, grid_resolution=50)
fig.suptitle('Partial dependence of house value on nonlocation features\n'
'for the California housing dataset')
plt.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle
print('Custom 3d plot via ``partial_dependence``')
fig = plt.figure()
target_feature = (1, 5)
pdp, axes = partial_dependence(clf, target_feature,
X=X_train, grid_resolution=50)
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[0].reshape(list(map(np.size, axes))).T
ax = Axes3D(fig)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu)
ax.set_xlabel(names[target_feature[0]])
ax.set_ylabel(names[target_feature[1]])
ax.set_zlabel('Partial dependence')
# pretty init view
ax.view_init(elev=22, azim=122)
plt.colorbar(surf)
plt.suptitle('Partial dependence of house value on median age and '
'average occupancy')
plt.subplots_adjust(top=0.9)
plt.show()
# Needed on Windows because plot_partial_dependence uses multiprocessing
if __name__ == '__main__':
main()
| bsd-3-clause |
fbagirov/scikit-learn | sklearn/linear_model/tests/test_theil_sen.py | 234 | 9928 | """
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <[email protected]>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from nose.tools import raises, assert_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import assert_greater, assert_less
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
yield
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1/(np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
| bsd-3-clause |
appapantula/scikit-learn | sklearn/utils/tests/test_shortest_path.py | 303 | 2841 | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix = dist_matrix + dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
| bsd-3-clause |
jwass/mplexporter | mplexporter/renderers/vincent_renderer.py | 64 | 1922 | import warnings
from .base import Renderer
from ..exporter import Exporter
class VincentRenderer(Renderer):
def open_figure(self, fig, props):
self.chart = None
self.figwidth = int(props['figwidth'] * props['dpi'])
self.figheight = int(props['figheight'] * props['dpi'])
def draw_line(self, data, coordinates, style, label, mplobj=None):
import vincent # only import if VincentRenderer is used
if coordinates != 'data':
warnings.warn("Only data coordinates supported. Skipping this")
linedata = {'x': data[:, 0],
'y': data[:, 1]}
line = vincent.Line(linedata, iter_idx='x',
width=self.figwidth, height=self.figheight)
# TODO: respect the other style settings
line.scales['color'].range = [style['color']]
if self.chart is None:
self.chart = line
else:
warnings.warn("Multiple plot elements not yet supported")
def draw_markers(self, data, coordinates, style, label, mplobj=None):
import vincent # only import if VincentRenderer is used
if coordinates != 'data':
warnings.warn("Only data coordinates supported. Skipping this")
markerdata = {'x': data[:, 0],
'y': data[:, 1]}
markers = vincent.Scatter(markerdata, iter_idx='x',
width=self.figwidth, height=self.figheight)
# TODO: respect the other style settings
markers.scales['color'].range = [style['facecolor']]
if self.chart is None:
self.chart = markers
else:
warnings.warn("Multiple plot elements not yet supported")
def fig_to_vincent(fig):
"""Convert a matplotlib figure to a vincent object"""
renderer = VincentRenderer()
exporter = Exporter(renderer)
exporter.run(fig)
return renderer.chart
| bsd-3-clause |
raghavrv/scikit-learn | sklearn/__check_build/__init__.py | 345 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
anisfeld/MachineLearning | Pipeline/util.py | 1 | 2394 | import pandas as pd
import re
from random import sample
# Helper Functions
def check_nulls(df, col):
'''
returns df with NaN in specified column(s)
'''
return df.ix[df.ix[:,col].isnull()]
def get_notnulls(df, col):
'''
returns df without NaN in specified column(s)
'''
return df.ix[df.ix[:,col].notnull()]
def clean_data(df, cleaning_tuples):
'''
replace a string in a column (pat) with a clean string (repl):
e.g. cleaning_tuples = [(col, pat, repl)]
'''
for col, pat, repl in cleaning_tuples:
df.ix[:,col] = df.ix[:,col].str.replace(pat, repl)
def clean_grouped_data(grouped_df,col=0):
'''
returns df with counts that result from groupby
'''
counts = pd.DataFrame(grouped_df.count().ix[:,col])
counts = counts.unstack()
counts.columns = counts.columns.droplevel()
counts.columns.name = None
counts.index.name = None
counts.fillna(0, inplace=True)
return counts
def combine_cols(df, col, extra):
'''
Inputs:
df (pd.DataFrame)
col,extra (string) column names
Combines columns with similar information into a single column and drops extra.
'''
df.ix[:,col] = df.ix[:,col].where(df.ix[:,col].notnull(), df.ix[:,extra])
df.drop(extra, axis=1, inplace=True)
def get_subsample(x, n, method = "sample"):
'''
input:
x (array-like)
n (numeric) sample size
method keywods that determine how to subsample ("sample", "head")
'''
if n > len(x):
return "ERROR: n > len(x)"
#Look into ways of passing part of a function name to call the function?
# e.g. pass sample, do pd.DataFrame.sample(df, n)
if method == "sample":
return x.sample(n)
elif method == "head":
return x.head(n)
def camel_to_snake(column_name):
"""
converts a string that is camelCase into snake_case
Example:
print camel_to_snake("javaLovesCamelCase")
> java_loves_camel_case
See Also:
http://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-camel-case
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', column_name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def map_camel_to_snake(s):
'''
Converts a series of strings in camelCase to snake_case
'''
return s.map(camel_to_snake)
| mit |
feuerchop/increOCSVM | results/profile_mnist/line_profiler_mnist_10000_0.2_10.py | 1 | 44650 | ['../evaluation_tmp.py', '10000']
mnist classes = 2
size: 10000
(2609,)
(7391,)
data size: 10000, nu: 0.2, gamma: 1
============ 1. Fold of CV ============
1) Incremental OCSVM
0 data points processed
1000 data points processed
2000 data points processed
3000 data points processed
4000 data points processed
5000 data points processed
6000 data points processed
None
Confusion matrix:
Prediction -1 1
Target
-1 2085 524
1 5915 1476
precision: 0.738, recall: 0.199702340685, f1-score: 0.314343520392
Number of support vectors: 8000
-----------
2) cvxopt-OCSVM
Confusion matrix:
Prediction 1
Target
-1 2609
1 7391
precision: 0.7391, recall: 1.0, f1-score: 0.849979874648
Number of support vectors: 8000
---------
3) sklearn-OCSVM
Confusion matrix:
Prediction -1 1
Target
-1 1677 932
1 4723 2668
Number of support vectors: 8000
precision: 0.741111111111, recall: 0.360979569747, f1-score: 0.485488126649
Wrote profile results to evaluation_tmp.py.lprof
Timer unit: 1e-06 s
Total time: 4446.27 s
File: ../ocsvm.py
Function: increment at line 97
Line # Hits Time Per Hit % Time Line Contents
==============================================================
97 @profile
98 def increment(self, Xc, init_ac=0, break_count=-1):
99
100 # epsilon
101 1 6 6.0 0.0 e = self._data._e
102 1 3 3.0 0.0 mu = 0
103 1 3 3.0 0.0 imin = None
104
105 # initialize existing X, coefficients a, C
106 1 7 7.0 0.0 X_origin = self._data.X()
107 1 6 6.0 0.0 K_X_origin = self._data.K_X()
108 1 6 6.0 0.0 n_data = X_origin.shape[0]
109 1 3 3.0 0.0 n_feature = X_origin.shape[1]
110
111 1 6 6.0 0.0 C = self._data.C()
112 1 6 6.0 0.0 a_origin = self._data.alpha()
113
114 # number of new incremental points
115 1 3 3.0 0.0 n_new = Xc.shape[0]
116
117 # number of all (new and existing) points
118 1 4 4.0 0.0 n_all = n_data + n_new
119
120 # concatenate all new points with all existing points
121 1 21 21.0 0.0 X = empty((n_new + n_data, n_feature))
122 1 31954 31954.0 0.0 X[0:n_new, :] = Xc
123 1 8589 8589.0 0.0 X[n_new:, :] = X_origin
124
125 # create kernel matrix for all new and existing points
126
127 # create of all data points
128 1 8 8.0 0.0 if K_X_origin == None:
129 1 10559662 10559662.0 0.2 K_X = self.gram(X)
130 else:
131 K_X = empty((n_all, n_all))
132 K_X[n_new:, n_new:] = K_X_origin
133 K_X_new = self.gram(Xc, X_origin)
134 K_X[0:n_new, :] = K_X_new
135 K_X[:, 0:n_new] = K_X_new.T
136
137 # creating coefficient vector alpha for all data points
138 1 28 28.0 0.0 a = empty(n_all)
139 1 19 19.0 0.0 a[n_new:] = a_origin
140 1 25 25.0 0.0 a[:n_new] = init_ac
141
142 # creating gradient vector
143 1 26 26.0 0.0 g = zeros(n_all)
144
145 # create sensitivity vector
146 1 7 7.0 0.0 gamma = empty(n_all)
147 1 4 4.0 0.0 check_gradient = False
148 # loop through all new points to increment
149 6316 34329 5.4 0.0 for x_count in range(n_new):
150 6315 34922 5.5 0.0 if x_count % 1000 == 0:
151 7 128 18.3 0.0 print "%s data points processed" % x_count
152 #print "--------- START %s ---------" % x_count
153
154 6315 25174 4.0 0.0 if x_count == break_count:
155 self._data.set_X(X)
156 self._data.set_alpha(a)
157 self._data.set_C(C)
158 self._data.set_K_X(K_X)
159 self.rho()
160 return False
161
162 # initialize X, a, C, g, indices, kernel values
163 6315 28553 4.5 0.0 start_origin = n_new - x_count
164 6315 23484 3.7 0.0 start_new = start_origin - 1
165
166 6315 22499 3.6 0.0 if x_count == 0:
167 1 4 4.0 0.0 inds = []
168 1 4 4.0 0.0 indr = []
169 1 3 3.0 0.0 inde = []
170 1 4 4.0 0.0 indo = []
171 1686 6578 3.9 0.0 for i in range(n_new, n_all):
172 1685 9710 5.8 0.0 if e < a[i] < C - e:
173 1685 7113 4.2 0.0 inds.append(i)
174 else:
175 indr.append(i)
176 if a[i] <= e:
177 indo.append(i)
178 else:
179 inde.append(i)
180
181 1 6 6.0 0.0 ls = len(inds) # support vectors length
182 1 4 4.0 0.0 lr = len(indr) # error and non-support vectors length
183 1 4 4.0 0.0 le = len(inde) # error vectors lenght
184 1 4 4.0 0.0 lo = len(indo)
185 #mu_old = mu
186 1 170 170.0 0.0 mu = - K_X[inds[0], :][start_origin:].dot(a[start_origin:])
187 1 4 4.0 0.0 if lr > 0:
188 g[indr] = K_X[indr, :][:, start_origin:].dot(a[start_origin:]) + mu
189 # calculate mu according to KKT-conditions
190
191
192 6315 930130 147.3 0.0 c_inds = [start_new] + inds
193
194 # kernel of support vectors
195 #Kss = K_X[:, inds][inds, :]
196 #print "difference indo: %s" % unique(round(K_X[indo, :][:, start_origin:].dot(a[start_origin:]) + mu - g[indo],6))
197 #check_gradient = True
198 #if check_gradient:
199 #g[indr] = K_X[indr, :][:, start_origin:].dot(a[start_origin:]) + mu
200 #g[indo] += K_X[indo[0], :][start_origin:].dot(a[start_origin:]) + mu - g[indo[0]]
201 #check_gradient = False
202 #print "difference indo: %s" % unique(round(K_X[indo, :][:, start_origin:].dot(a[start_origin:]) + mu - g[indo],6))
203 6315 28325 4.5 0.0 if ls > 0:
204 6315 2500668 396.0 0.1 gc = K_X[start_new, start_origin:].dot(a[start_origin:]) + mu
205
206 6315 31463 5.0 0.0 ac = a[start_new]
207
208 6315 23865 3.8 0.0 if x_count == 0:
209 1 5438 5438.0 0.0 Q = ones((ls+1, ls+1))
210 1 10 10.0 0.0 Q[0, 0] = 0
211 #Kss = self.gram(X[inds])
212 1686 7977 4.7 0.0 inds_row = [[i] for i in inds]
213 1 371349 371349.0 0.0 Q[1:, 1:] = K_X[inds_row, inds]
214 1 6 6.0 0.0 try:
215 1 1887830 1887830.0 0.0 R = inv(Q)
216 except np.linalg.linalg.LinAlgError:
217 x = 1e-11
218 found = False
219 print "singular matrix"
220 while not found:
221 try:
222 R = inv(Q + diag(ones(ls+1) * x))
223 found = True
224 except np.linalg.linalg.LinAlgError:
225 x = x*10
226 6315 24024 3.8 0.0 loop_count = 1
227 #print "gc: %s, ac: %s" % (gc, ac)
228 6315 96477 15.3 0.0 while gc < e and ac < C - e:
229 6315 24173 3.8 0.0 if ls == 0: check_gradient = True
230 #print "-------------------- incremental %s-%s ---------" % (x_count, loop_count)
231
232 6315 24031 3.8 0.0 if ls > 0:
233 6315 7278480 1152.6 0.2 n = K_X[start_new, :][c_inds]
234 6315 276420874 43772.1 6.2 beta = - R.dot(n)
235 6315 70854 11.2 0.0 betas = beta[1:]
236
237 # calculate gamma
238 6315 30666 4.9 0.0 if lr > 0 and ls > 0:
239 gamma_tmp = K_X[:, c_inds][start_new:]
240 gamma_tmp[:, 0] = 1
241 gamma[start_new:] = gamma_tmp.dot(beta) + K_X[start_new, :][start_new:]
242 gammac = gamma[start_new]
243
244 6315 26280 4.2 0.0 elif ls > 0:
245 # empty R set
246 6315 11314559 1791.7 0.3 gammac = K_X[start_new, :][c_inds].dot(beta) + 1
247
248 else:
249 # empty S set
250 gammac = 1
251 gamma[indr] = 1
252 #gamma[indo] = -1
253
254 # accounting
255 #case 1: Some alpha_i in S reaches a bound
256 6315 28753 4.6 0.0 if ls > 0:
257 6315 171869 27.2 0.0 IS_plus = betas > e
258 6315 115733 18.3 0.0 IS_minus = betas < - e
259 6315 297828 47.2 0.0 gsmax = ones(ls)*inf
260 #if np.isnan(np.min(gsmax)):
261 # gsmax = ones(ls)*inf
262 6315 6462461 1023.4 0.1 gsmax[IS_plus] = -a[inds][IS_plus] + C
263 6315 6554085 1037.9 0.1 gsmax[IS_minus] = - a[inds][IS_minus]
264 #gsmax[IS_plus] = -a[inds][IS_plus]
265 #gsmax[IS_plus] += C
266 #gsmax[IS_minus] = - a[inds][IS_minus]
267 6315 284358 45.0 0.0 gsmax = divide(gsmax, betas)
268 6315 4342393 687.6 0.1 gsmin = min(absolute(gsmax))
269 #print where(absolute(gsmax) == gsmin)
270 6315 459013 72.7 0.0 ismin = where(absolute(gsmax) == gsmin)[0][0]
271
272 else: gsmin = inf
273
274 #case 2: Some g_i in E reaches zero
275 6315 27912 4.4 0.0 if le > 0:
276
277 gamma_inde = gamma[inde]
278 g_inde = g[inde]
279 Ie_plus = gamma_inde > e
280
281 if len(g_inde[Ie_plus]) > 0:
282 gec = divide(-g_inde[Ie_plus], gamma_inde[Ie_plus])
283 gec[gec <= 0] = inf
284 gemin = min(gec)
285 if gemin < inf:
286 iemin = where(gec == gemin)[0][0]
287 else: gemin = inf
288 6315 27717 4.4 0.0 else: gemin = inf
289 #case 2: Some g_i in O reaches zero
290 6315 26844 4.3 0.0 if lo > 0 and ls > 0:
291 gamma_indo = gamma[indo]
292 g_indo = g[indo]
293 Io_minus = gamma_indo < - e
294 if len(g_indo[Io_minus]) > 0:
295 goc = divide(-g_indo[Io_minus], gamma_indo[Io_minus])
296 goc[goc <= 0] = inf
297 goc[g_indo[Io_minus] < 0] = inf
298 gomin = min(goc)
299 if gomin < inf:
300 iomin = where(goc == gomin)[0][0]
301 else: gomin = inf
302 6315 26472 4.2 0.0 else: gomin = inf
303
304 # case 3: gc becomes zero
305 6315 52912 8.4 0.0 if gammac > e: gcmin = - gc/gammac
306 else: gcmin = inf
307
308 # case 4
309 6315 39095 6.2 0.0 if ls > 0: gacmin = C - ac
310 else: gacmin = inf
311
312 # determine minimum largest increment
313 6315 37991 6.0 0.0 all_deltas = [gsmin, gemin, gomin, gcmin, gacmin]
314 6315 51044 8.1 0.0 gmin = min(all_deltas)
315 6315 151241 23.9 0.0 imin = where(all_deltas == gmin)[0][0]
316 # update a, g
317 6315 28142 4.5 0.0 if ls > 0:
318 6315 40268 6.4 0.0 mu += beta[0]*gmin
319 6315 29448 4.7 0.0 ac += gmin
320 6315 11957014 1893.4 0.3 a[inds] += betas*gmin
321 else:
322 mu += gmin
323 6315 31456 5.0 0.0 if lr > 0:
324 g[indr] += gamma[indr] * gmin
325 6315 35200 5.6 0.0 gc += gammac * gmin
326 6315 44916 7.1 0.0 if imin == 0: # min = gsmin => move k from s to r
327 # if there are more than 1 minimum, just take 1
328 ak = a[inds][ismin]
329
330 # delete the elements from X,a and g
331 # => add it to the end of X,a,g
332 ind_del = inds[ismin]
333 inds.remove(ind_del)
334 c_inds = [start_new] + inds
335 indr.append(ind_del)
336 if ak < e:
337 indo.append(ind_del)
338 lo += 1
339 else:
340 inde.append(ind_del)
341 le += 1
342
343 lr += 1
344 #decrement R, delete row ismin and column ismin
345
346 if ls > 2:
347 ismin += 1
348 R_new = zeros((ls,ls))
349 R_new[0:ismin, 0:ismin] = R[0:ismin, 0:ismin]
350 R_new[ismin:, 0:ismin] = R[ismin+1:,0:ismin]
351 R_new[0:ismin, ismin:] = R[0:ismin, ismin+1:]
352 R_new[ismin:, ismin:] = R[ismin+1:, ismin+1:]
353 betak = zeros(ls)
354 betak[:ismin] = R[ismin, :ismin]
355 betak[ismin:] = R[ismin, ismin+1:]
356 R_new -= outer(betak, betak)/R[ismin,ismin]
357 R = R_new
358 elif ls == 2:
359 R = ones((2, 2))
360 R[1,1] = 0
361 R[0,0] = -1
362 else:
363 R = inf
364 ls -= 1
365
366 6315 33341 5.3 0.0 elif imin == 1:
367 # delete the elements from X,a and g => add it to the end of X,a,g
368 ### old version find index to delete
369 #Ieplus_l = [i for i,b in enumerate(Ie_plus) if b]
370 #ind_del = inde[Ieplus_l[iemin]]
371 ### old version find index to delete
372 ind_del = np.asarray(inde)[Ie_plus][iemin]
373 if ls > 0:
374 nk = K_X[ind_del, :][[ind_del] + inds]
375 betak = - R.dot(nk)
376 betak1 = ones(ls + 2)
377 betak1[:-1] = betak
378 R_old = R
379 R = 1/k * outer(betak1, betak1)
380 R[:-1,:-1] += R_old
381 else:
382 R = ones((2, 2))
383 R[1,1] = 0
384 R[0,0] = -1
385 inds.append(ind_del)
386 c_inds = [start_new] + inds
387 indr.remove(ind_del)
388 inde.remove(ind_del)
389 ls += 1
390 lr -= 1
391 le -= 1
392
393 6315 32065 5.1 0.0 elif imin == 2: # min = gemin | gomin => move k from r to s
394
395 # delete the elements from X,a and g => add it to the end of X,a,g
396
397 ### old version find index to delete
398 #Io_minus_l = [i for i,b in enumerate(Io_minus) if b]
399 #ind_del = indo[Io_minus_l[iomin]]
400 ### old version find index to delete
401 ind_del = np.asarray(indo)[Io_minus][iomin]
402 if ls > 0:
403 nk = ones(ls+1)
404 nk[1:] = K_X[ind_del,:][inds]
405 betak = - R.dot(nk)
406 k = 1 - nk.dot(R).dot(nk)
407 betak1 = ones(ls+2)
408 betak1[:-1] = betak
409 R_old = R
410 R = 1/k * outer(betak1, betak1)
411 R[:-1,:-1] += R_old
412 else:
413 R = ones((2, 2))
414 R[1,1] = 0
415 R[0,0] = -1
416
417 indo.remove(ind_del)
418 indr.remove(ind_del)
419 inds.append(ind_del)
420 c_inds = [start_new] + inds
421 lo -= 1
422 lr -= 1
423 ls += 1
424 6315 32323 5.1 0.0 elif imin == 3:
425 '''
426 if ls > 0:
427 nk = ones(ls+1)
428 nk[1:] = K_X[start_new, :][inds]
429 betak = - R.dot(nk)
430 k = 1 - nk.dot(R).dot(nk)
431 betak1 = ones(ls + 2)
432 betak1[:-1] = betak
433 R_old = R
434 R = zeros((ls +2, ls +2))
435 R[:-1,:-1] = R_old
436 R += 1/k * outer(betak1, betak1)
437 else:
438 R = ones((2, 2))
439 R[1,1] = 0
440 R[0,0] = -1
441 '''
442 6315 28391 4.5 0.0 break
443 else:
444 break
445 loop_count += 1
446
447 6315 31180 4.9 0.0 a[start_new] = ac
448 6315 31385 5.0 0.0 g[start_new] = gc
449 6315 34513 5.5 0.0 if ac < e:
450 indr.append(start_new)
451 indo.append(start_new)
452 lr += 1
453 lo += 1
454 6315 39033 6.2 0.0 elif ac > C - e:
455 indr.append(start_new)
456 inde.append(start_new)
457 lr += 1
458 le += 1
459 else:
460 6315 45526 7.2 0.0 inds.append(start_new)
461 6315 29069 4.6 0.0 g[start_new] = 0
462 6315 37538 5.9 0.0 if len(inds) == 1:
463 R = ones((2, 2))
464 R[1,1] = 0
465 R[0,0] = -1
466 else:
467 6315 43113 6.8 0.0 if R.shape[0] != len(inds) + 1:
468 6315 127707 20.2 0.0 nk = ones(ls+1)
469 6315 7318330 1158.9 0.2 nk[1:] = K_X[start_new, :][inds[:-1]]
470 6315 276033663 43710.8 6.2 betak = - R.dot(nk)
471 6315 949917 150.4 0.0 k = 1 - nk.dot(R).dot(nk)
472 6315 221603 35.1 0.0 betak1 = ones(ls + 2)
473 6315 96065 15.2 0.0 betak1[:-1] = betak
474 6315 82876318 13123.7 1.9 R_old = R
475 6315 2616448189 414322.8 58.8 R = 1/k * outer(betak1, betak1)
476 6315 1114393414 176467.7 25.1 R[:-1,:-1] += R_old
477
478 6315 56172 8.9 0.0 ls += 1
479 # update X, a
480 1 27 27.0 0.0 self._data.set_X(X)
481 1 9 9.0 0.0 self._data.set_alpha(a)
482 1 9 9.0 0.0 self._data.set_C(C)
483 1 10 10.0 0.0 self._data.set_K_X(K_X)
484 1 4118987 4118987.0 0.1 print self.rho()
*** PROFILER RESULTS ***
incremental_ocsvm (../evaluation_tmp.py:185)
function called 1 times
186226 function calls in 4458.908 seconds
Ordered by: cumulative time, internal time, call count
List reduced from 149 to 40 due to restriction <40>
ncalls tottime percall cumtime percall filename:lineno(function)
1 0.000 0.000 4458.908 4458.908 evaluation_tmp.py:185(incremental_ocsvm)
1 0.062 0.062 4448.712 4448.712 line_profiler.py:95(wrapper)
1 2499.493 2499.493 4448.650 4448.650 ocsvm.py:97(increment)
6315 1372.956 0.217 1373.067 0.217 numeric.py:740(outer)
37892 554.850 0.015 554.850 0.015 {method 'dot' of 'numpy.ndarray' objects}
2 0.000 0.000 11.064 5.532 ocsvm.py:58(gram)
2 0.000 0.000 11.064 5.532 pairwise.py:1164(pairwise_kernels)
2 0.000 0.000 11.064 5.532 pairwise.py:949(_parallel_pairwise)
2 2.008 1.004 11.064 5.532 pairwise.py:740(rbf_kernel)
1 0.013 0.013 10.196 10.196 ocsvm.py:35(fit)
1 0.386 0.386 10.183 10.183 ocsvm.py:62(alpha)
1 0.003 0.003 9.108 9.108 coneprog.py:4159(qp)
1 0.005 0.005 9.104 9.104 coneprog.py:1441(coneqp)
2 0.890 0.445 9.029 4.515 pairwise.py:136(euclidean_distances)
5 0.000 0.000 8.759 1.752 coneprog.py:1984(kktsolver)
5 0.120 0.024 8.759 1.752 misc.py:1389(factor)
2 0.000 0.000 8.112 4.056 extmath.py:171(safe_sparse_dot)
2 0.000 0.000 8.112 4.056 extmath.py:129(fast_dot)
2 7.718 3.859 8.112 4.056 extmath.py:97(_fast_dot)
5 6.097 1.219 6.097 1.219 {cvxopt.base.syrk}
12647 4.239 0.000 4.239 0.000 {min}
1 3.809 3.809 4.074 4.074 ocsvm.py:45(rho)
1 0.000 0.000 1.888 1.888 linalg.py:404(inv)
1 0.000 0.000 1.883 1.883 linalg.py:244(solve)
1 1.740 1.740 1.740 1.740 {numpy.linalg.lapack_lite.dgesv}
5 1.316 0.263 1.316 0.263 {cvxopt.base.gemm}
10 1.191 0.119 1.191 0.119 {cvxopt.lapack.potrf}
8 0.000 0.000 0.438 0.055 validation.py:268(check_array)
8 0.000 0.000 0.429 0.054 validation.py:43(_assert_all_finite)
8 0.428 0.054 0.428 0.054 {method 'sum' of 'numpy.ndarray' objects}
4 0.000 0.000 0.394 0.099 extmath.py:87(_impose_f_order)
18950 0.115 0.000 0.369 0.000 numeric.py:1791(ones)
56 0.255 0.005 0.255 0.005 {cvxopt.base.gemv}
9 0.000 0.000 0.222 0.025 misc.py:1489(solve)
12630 0.210 0.000 0.210 0.000 {numpy.core.multiarray.where}
8 0.000 0.000 0.199 0.025 coneprog.py:2333(f4)
8 0.000 0.000 0.198 0.025 coneprog.py:2291(f4_no_ir)
2 0.000 0.000 0.160 0.080 shape_base.py:177(vstack)
2 0.158 0.079 0.158 0.079 {numpy.core.multiarray.concatenate}
1 0.157 0.157 0.158 0.158 data.py:29(Xs)
*** PROFILER RESULTS ***
cvxopt_ocsvm (../evaluation_tmp.py:181)
function called 1 times
1399 function calls in 851.843 seconds
Ordered by: cumulative time, internal time, call count
List reduced from 123 to 40 due to restriction <40>
ncalls tottime percall cumtime percall filename:lineno(function)
1 0.000 0.000 851.843 851.843 evaluation_tmp.py:181(cvxopt_ocsvm)
1 0.215 0.215 851.843 851.843 ocsvm.py:35(fit)
1 13.610 13.610 836.090 836.090 ocsvm.py:62(alpha)
1 0.085 0.085 805.091 805.091 coneprog.py:4159(qp)
1 0.009 0.009 805.006 805.006 coneprog.py:1441(coneqp)
5 0.000 0.000 797.632 159.526 coneprog.py:1984(kktsolver)
5 2.340 0.468 797.632 159.526 misc.py:1389(factor)
5 630.443 126.089 630.443 126.089 {cvxopt.base.syrk}
10 110.158 11.016 110.158 11.016 {cvxopt.lapack.potrf}
5 53.899 10.780 53.899 10.780 {cvxopt.base.gemm}
2 0.000 0.000 25.810 12.905 ocsvm.py:58(gram)
2 0.000 0.000 25.810 12.905 pairwise.py:1164(pairwise_kernels)
2 0.012 0.006 25.810 12.905 pairwise.py:949(_parallel_pairwise)
2 3.824 1.912 25.798 12.899 pairwise.py:740(rbf_kernel)
2 1.760 0.880 21.800 10.900 pairwise.py:136(euclidean_distances)
2 0.000 0.000 19.970 9.985 extmath.py:171(safe_sparse_dot)
2 0.000 0.000 19.970 9.985 extmath.py:129(fast_dot)
2 19.296 9.648 19.970 9.985 extmath.py:97(_fast_dot)
1 0.000 0.000 15.538 15.538 ocsvm.py:45(rho)
2 0.000 0.000 5.915 2.957 shape_base.py:177(vstack)
2 5.914 2.957 5.914 2.957 {numpy.core.multiarray.concatenate}
56 5.881 0.105 5.881 0.105 {cvxopt.base.gemv}
9 0.001 0.000 4.780 0.531 misc.py:1489(solve)
8 0.000 0.000 4.241 0.530 coneprog.py:2333(f4)
8 0.000 0.000 4.241 0.530 coneprog.py:2291(f4_no_ir)
10 0.000 0.000 2.122 0.212 coneprog.py:1900(fG)
10 0.000 0.000 2.122 0.212 misc.py:801(sgemv)
18 1.019 0.057 1.019 0.057 {cvxopt.blas.trsv}
10 0.000 0.000 0.893 0.089 validation.py:268(check_array)
2 0.001 0.001 0.841 0.420 twodim_base.py:220(diag)
4 0.840 0.210 0.840 0.210 {numpy.core.multiarray.zeros}
5 0.780 0.156 0.780 0.156 {cvxopt.blas.trsm}
10 0.000 0.000 0.763 0.076 validation.py:43(_assert_all_finite)
10 0.762 0.076 0.762 0.076 {method 'sum' of 'numpy.ndarray' objects}
4 0.000 0.000 0.674 0.168 extmath.py:87(_impose_f_order)
5 0.000 0.000 0.432 0.086 coneprog.py:1847(fP)
5 0.432 0.086 0.432 0.086 {cvxopt.base.symv}
2 0.256 0.128 0.257 0.129 data.py:29(Xs)
4 0.000 0.000 0.219 0.055 pairwise.py:57(check_pairwise_arrays)
39 0.130 0.003 0.130 0.003 {numpy.core.multiarray.array}
*** PROFILER RESULTS ***
sklearn_ocsvm (../evaluation_tmp.py:177)
function called 1 times
61 function calls in 437.500 seconds
Ordered by: cumulative time, internal time, call count
ncalls tottime percall cumtime percall filename:lineno(function)
1 0.000 0.000 437.500 437.500 evaluation_tmp.py:177(sklearn_ocsvm)
1 0.004 0.004 437.500 437.500 classes.py:941(fit)
1 0.000 0.000 437.496 437.496 base.py:99(fit)
1 0.000 0.000 437.436 437.436 base.py:211(_dense_fit)
1 437.436 437.436 437.436 437.436 {sklearn.svm.libsvm.fit}
1 0.000 0.000 0.059 0.059 validation.py:268(check_array)
5 0.044 0.009 0.044 0.009 {numpy.core.multiarray.array}
1 0.000 0.000 0.015 0.015 validation.py:43(_assert_all_finite)
1 0.015 0.015 0.015 0.015 {method 'sum' of 'numpy.ndarray' objects}
1 0.000 0.000 0.000 0.000 base.py:193(_validate_targets)
1 0.000 0.000 0.000 0.000 validation.py:126(_shape_repr)
2 0.000 0.000 0.000 0.000 numeric.py:167(asarray)
1 0.000 0.000 0.000 0.000 numeric.py:1791(ones)
1 0.000 0.000 0.000 0.000 {method 'join' of 'str' objects}
2 0.000 0.000 0.000 0.000 base.py:553(isspmatrix)
1 0.000 0.000 0.000 0.000 {method 'fill' of 'numpy.ndarray' objects}
2 0.000 0.000 0.000 0.000 sputils.py:116(_isinstance)
2 0.000 0.000 0.000 0.000 numeric.py:237(asanyarray)
3 0.000 0.000 0.000 0.000 validation.py:153(<genexpr>)
1 0.000 0.000 0.000 0.000 getlimits.py:234(__init__)
2 0.000 0.000 0.000 0.000 {numpy.core.multiarray.empty}
1 0.000 0.000 0.000 0.000 validation.py:105(_num_samples)
1 0.000 0.000 0.000 0.000 {sklearn.svm.libsvm.set_verbosity_wrap}
1 0.000 0.000 0.000 0.000 shape_base.py:58(atleast_2d)
1 0.000 0.000 0.000 0.000 {method 'copy' of 'numpy.ndarray' objects}
1 0.000 0.000 0.000 0.000 validation.py:503(check_random_state)
3 0.000 0.000 0.000 0.000 {hasattr}
1 0.000 0.000 0.000 0.000 getlimits.py:259(max)
1 0.000 0.000 0.000 0.000 base.py:203(_warn_from_fit_status)
1 0.000 0.000 0.000 0.000 {method 'randint' of 'mtrand.RandomState' objects}
1 0.000 0.000 0.000 0.000 {method 'index' of 'list' objects}
6 0.000 0.000 0.000 0.000 {len}
4 0.000 0.000 0.000 0.000 {method 'split' of 'str' objects}
3 0.000 0.000 0.000 0.000 {isinstance}
1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}
2 0.000 0.000 0.000 0.000 {callable}
1 0.000 0.000 0.000 0.000 {method 'append' of 'list' objects}
0 0.000 0.000 profile:0(profiler)
| gpl-2.0 |
liupfskygre/qiime | scripts/make_otu_heatmap.py | 15 | 11322 | #!/usr/bin/env python
from __future__ import division
__author__ = "Dan Knights"
__copyright__ = "Copyright 2011, The QIIME project"
__credits__ = [
"Dan Knights",
"Jose Carlos Clemente Litran",
"Yoshiki Vazquez Baeza",
"Greg Caporaso",
"Jai Ram Rideout"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Dan Knights"
__email__ = "[email protected]"
import numpy as np
from biom import load_table
from qiime.make_otu_heatmap import (
plot_heatmap, get_clusters, make_otu_labels, extract_metadata_column,
get_order_from_categories, get_order_from_tree, names_to_indices,
get_log_transform, get_overlapping_samples)
from qiime.util import (parse_command_line_parameters, get_options_lookup,
make_option, MissingFileError)
from qiime.parse import parse_mapping_file
options_lookup = get_options_lookup()
script_info = {}
script_info['brief_description'] = """Plot heatmap of OTU table"""
script_info['script_description'] = (
"This script visualizes an OTU table as a heatmap where each row "
"corresponds to an OTU and each column corresponds to a sample. The "
"higher the relative abundance of an OTU in a sample, the more intense "
"the color at the corresponsing position in the heatmap. By default, the "
"OTUs (rows) will be clustered by UPGMA hierarchical clustering, and the "
"samples (columns) will be presented in the order in which they appear in "
"the OTU table. Alternatively, the user may supply a tree to sort the "
"OTUs (rows) or samples (columns), or both. The user may also pass in a "
"mapping file for sorting samples. If the user passes in a mapping file "
"and a metadata category, samples (columns) will be grouped by category "
"value and subsequently clustered within each group.")
script_info['script_usage'] = []
script_info['script_usage'].append(
("",
"Generate a heatmap as a PDF using all default values:",
"%prog -i otu_table.biom -o heatmap.pdf"))
script_info['script_usage'].append(
("",
"Generate a heatmap as a PNG:",
"%prog -i otu_table.biom -o heatmap.png -g png"))
script_info['script_usage'].append(
("",
"Sort the heatmap columns (samples) by the order of samples in the "
"mapping file",
"%prog -i otu_table.biom -o heatmap_sorted_samples.pdf -m "
"mapping_file.txt"))
script_info['script_usage'].append(
("",
"Sort the heatmap columns (samples) by the order of samples in the "
"mapping file, and sort the heatmap rows by the order of tips in the "
"tree:",
"%prog -i otu_table.biom -o heatmap_sorted.pdf -m mapping_file.txt -t "
"rep_set.tre"))
script_info['script_usage'].append(
("",
"Group the heatmap columns (samples) by metadata category (e.g., "
"Treatment), then cluster within each group:""",
"%prog -i otu_table.biom -o heatmap_grouped_by_Treatment.pdf -m "
"mapping_file.txt -c Treatment"))
script_info['output_description'] = (
"A single output file is created containing the heatmap of the OTU "
"table (a PDF file by default).")
script_info['required_options'] = [
options_lookup['otu_table_as_primary_input'],
options_lookup['output_fp']
]
script_info['optional_options'] = [
make_option('-t', '--otu_tree', type='existing_filepath', help='Tree file '
'to be used for sorting OTUs in the heatmap', default=None),
make_option('-m', '--map_fname', dest='map_fname',
type='existing_filepath', help='Metadata mapping file to be '
'used for sorting Samples in the heatmap.', default=None),
make_option('-c', '--category', dest='category', type="string",
help='Metadata category for sorting samples. Samples will be '
'clustered within each category level using euclidean UPGMA.',
default=None),
make_option('-s', '--sample_tree', dest='sample_tree',
type='existing_filepath', help='Tree file to be used for '
'sorting samples (e.g, output from upgma_cluster.py). If both '
'this and the sample mapping file are provided, the mapping '
'file is ignored.', default=None),
make_option('-g', '--imagetype',
help='type of image to produce (i.e. png, svg, pdf) '
'[default: %default]', default='pdf', type="choice",
choices=['pdf', 'png', 'svg']),
make_option('--no_log_transform', action="store_true",
help='Data will not be log-transformed. Without this option, '
'all zeros will be set to a small value (default is 1/2 the '
'smallest non-zero entry). Data will be translated to be '
'non-negative after log transform, and num_otu_hits will be '
'set to 0.', default=False),
make_option('--suppress_row_clustering', action="store_true",
help='No UPGMA clustering of OTUs (rows) is performed. If '
'--otu_tree is provided, this flag is ignored.',
default=False),
make_option('--suppress_column_clustering', action="store_true",
help='No UPGMA clustering of Samples (columns) is performed. '
'If --map_fname is provided, this flag is ignored.',
default=False),
make_option('--absolute_abundance', action="store_true",
help='Do not normalize samples to sum to 1 [default: %default]',
default=False),
make_option('--color_scheme', default="YlGn",
help="color scheme for figure. see "
"http://matplotlib.org/examples/color/"
"colormaps_reference.html for choices "
"[default: %default]"),
make_option('--width',
help='width of the figure in inches [default: %default]',
default=5, type='float'),
make_option('--height',
help='height of the figure in inches [default: %default]',
default=5, type='float'),
make_option('--dpi',
help='resolution of the figure in dots per inch '
'[default: value of savefig.dpi in matplotlibrc file]',
type='int', default=None),
make_option('--obs_md_category', default="taxonomy",
help="observation metadata category to plot "
"[default: %default]"),
make_option('--obs_md_level', default=None, type="int",
help="the level of observation metadata to plot for "
"hierarchical metadata [default: lowest level]")
]
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
otu_table = load_table(opts.otu_table_fp)
obs_md_category = opts.obs_md_category
obs_md_level = opts.obs_md_level
if obs_md_level is None:
# grab the last level if the user didn't specify a level
obs_md_level = -1
else:
# convert to 0-based indexing
obs_md_level -= 1
obs_md = otu_table.metadata(axis='observation')
obs_md_labels = []
if (obs_md is None or obs_md_category not in obs_md[0]):
obs_md_labels = [['']] * len(otu_table.ids(axis='observation'))
else:
for _, _, md in otu_table.iter(axis='observation'):
current_md = md[obs_md_category]
if obs_md_level < len(current_md):
current_md_at_level = current_md[obs_md_level]
else:
current_md_at_level = ''
obs_md_labels.append([current_md_at_level])
otu_labels = make_otu_labels(otu_table.ids(axis='observation'),
obs_md_labels)
# Convert to relative abundance if requested
if not opts.absolute_abundance:
otu_table = otu_table.norm(axis='observation')
# Get log transform if requested
if not opts.no_log_transform:
otu_table = get_log_transform(otu_table)
# Re-order samples by tree if provided
if opts.sample_tree is not None:
sample_order = get_order_from_tree(otu_table.ids(),
open(opts.sample_tree, 'U'))
# if there's no sample tree, sort samples by mapping file
elif opts.map_fname is not None:
lines = open(opts.map_fname, 'U').readlines()
metadata = list(parse_mapping_file(lines))
new_map, otu_table = get_overlapping_samples(metadata[0], otu_table)
metadata[0] = new_map
map_sample_ids = zip(*metadata[0])[0]
# if there's a category, do clustering within each category
if opts.category is not None:
category_labels = extract_metadata_column(otu_table.ids(),
metadata, opts.category)
sample_order = get_order_from_categories(otu_table,
category_labels)
# else: just use the mapping file order
else:
ordered_sample_ids = []
for sample_id in map_sample_ids:
if otu_table.exists(sample_id):
ordered_sample_ids.append(sample_id)
sample_order = names_to_indices(
otu_table.ids(),
ordered_sample_ids)
# if no tree or mapping file, perform upgma euclidean
elif not opts.suppress_column_clustering:
data = np.asarray([i for i in otu_table.iter_data(axis='observation')])
sample_order = get_clusters(data, axis='column')
# else just use OTU table ordering
else:
sample_order = np.arange(len(otu_table.ids()))
# re-order OTUs by tree (if provided), or clustering
if opts.otu_tree is not None:
# open tree file
try:
f = open(opts.otu_tree, 'U')
except (TypeError, IOError):
raise MissingFileError("Couldn't read tree file at path: %s" %
opts.otu_tree)
otu_order = get_order_from_tree(otu_table.ids(axis='observation'), f)
f.close()
# if no tree or mapping file, perform upgma euclidean
elif not opts.suppress_row_clustering:
data = np.asarray([i for i in otu_table.iter_data(axis='observation')])
otu_order = get_clusters(data, axis='row')
# else just use OTU table ordering
else:
otu_order = np.arange(len(otu_table.ids(axis='observation')))
# otu_order and sample_order should be ids, rather than indices
# to use in sortObservationOrder/sortSampleOrder
otu_id_order = [otu_table.ids(axis='observation')[i] for i in otu_order]
sample_id_order = [otu_table.ids()[i] for i in sample_order]
# Re-order otu table, sampleids, etc. as necessary
otu_table = otu_table.sort_order(otu_id_order, axis='observation')
otu_labels = np.array(otu_labels)[otu_order]
otu_table = otu_table.sort_order(sample_id_order)
sample_labels = otu_table.ids()
plot_heatmap(otu_table, otu_labels, sample_labels, opts.output_fp,
imagetype=opts.imagetype, width=opts.width,
height=opts.height, dpi=opts.dpi,
color_scheme=opts.color_scheme)
if __name__ == "__main__":
main()
| gpl-2.0 |
jseabold/statsmodels | statsmodels/sandbox/nonparametric/dgp_examples.py | 4 | 5992 | # -*- coding: utf-8 -*-
"""Examples of non-linear functions for non-parametric regression
Created on Sat Jan 05 20:21:22 2013
Author: Josef Perktold
"""
import numpy as np
## Functions
def fg1(x):
'''Fan and Gijbels example function 1
'''
return x + 2 * np.exp(-16 * x**2)
def fg1eu(x):
'''Eubank similar to Fan and Gijbels example function 1
'''
return x + 0.5 * np.exp(-50 * (x - 0.5)**2)
def fg2(x):
'''Fan and Gijbels example function 2
'''
return np.sin(2 * x) + 2 * np.exp(-16 * x**2)
def func1(x):
'''made up example with sin, square
'''
return np.sin(x * 5) / x + 2. * x - 1. * x**2
## Classes with Data Generating Processes
doc = {'description':
'''Base Class for Univariate non-linear example
Does not work on it's own.
needs additional at least self.func
''',
'ref': ''}
class _UnivariateFunction(object):
#Base Class for Univariate non-linear example.
#Does not work on it's own. needs additionally at least self.func
__doc__ = '''%(description)s
Parameters
----------
nobs : int
number of observations to simulate
x : None or 1d array
If x is given then it is used for the exogenous variable instead of
creating a random sample
distr_x : None or distribution instance
Only used if x is None. The rvs method is used to create a random
sample of the exogenous (explanatory) variable.
distr_noise : None or distribution instance
The rvs method is used to create a random sample of the errors.
Attributes
----------
x : ndarray, 1-D
exogenous or explanatory variable. x is sorted.
y : ndarray, 1-D
endogenous or response variable
y_true : ndarray, 1-D
expected values of endogenous or response variable, i.e. values of y
without noise
func : callable
underlying function (defined by subclass)
%(ref)s
''' #% doc
def __init__(self, nobs=200, x=None, distr_x=None, distr_noise=None):
if x is None:
if distr_x is None:
x = np.random.normal(loc=0, scale=self.s_x, size=nobs)
else:
x = distr_x.rvs(size=nobs)
x.sort()
self.x = x
if distr_noise is None:
noise = np.random.normal(loc=0, scale=self.s_noise, size=nobs)
else:
noise = distr_noise.rvs(size=nobs)
if hasattr(self, 'het_scale'):
noise *= self.het_scale(self.x)
#self.func = fg1
self.y_true = y_true = self.func(x)
self.y = y_true + noise
def plot(self, scatter=True, ax=None):
'''plot the mean function and optionally the scatter of the sample
Parameters
----------
scatter : bool
If true, then add scatterpoints of sample to plot.
ax : None or matplotlib axis instance
If None, then a matplotlib.pyplot figure is created, otherwise
the given axis, ax, is used.
Returns
-------
Figure
This is either the created figure instance or the one associated
with ax if ax is given.
'''
if ax is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
if scatter:
ax.plot(self.x, self.y, 'o', alpha=0.5)
xx = np.linspace(self.x.min(), self.x.max(), 100)
ax.plot(xx, self.func(xx), lw=2, color='b', label='dgp mean')
return ax.figure
doc = {'description':
'''Fan and Gijbels example function 1
linear trend plus a hump
''',
'ref':
'''
References
----------
Fan, Jianqing, and Irene Gijbels. 1992. "Variable Bandwidth and Local
Linear Regression Smoothers."
The Annals of Statistics 20 (4) (December): 2008-2036. doi:10.2307/2242378.
'''}
class UnivariateFanGijbels1(_UnivariateFunction):
__doc__ = _UnivariateFunction.__doc__ % doc
def __init__(self, nobs=200, x=None, distr_x=None, distr_noise=None):
self.s_x = 1.
self.s_noise = 0.7
self.func = fg1
super(self.__class__, self).__init__(nobs=nobs, x=x,
distr_x=distr_x,
distr_noise=distr_noise)
doc['description'] =\
'''Fan and Gijbels example function 2
sin plus a hump
'''
class UnivariateFanGijbels2(_UnivariateFunction):
__doc__ = _UnivariateFunction.__doc__ % doc
def __init__(self, nobs=200, x=None, distr_x=None, distr_noise=None):
self.s_x = 1.
self.s_noise = 0.5
self.func = fg2
super(self.__class__, self).__init__(nobs=nobs, x=x,
distr_x=distr_x,
distr_noise=distr_noise)
class UnivariateFanGijbels1EU(_UnivariateFunction):
'''
Eubank p.179f
'''
def __init__(self, nobs=50, x=None, distr_x=None, distr_noise=None):
if distr_x is None:
from scipy import stats
distr_x = stats.uniform
self.s_noise = 0.15
self.func = fg1eu
super(self.__class__, self).__init__(nobs=nobs, x=x,
distr_x=distr_x,
distr_noise=distr_noise)
class UnivariateFunc1(_UnivariateFunction):
'''
made up, with sin and quadratic trend
'''
def __init__(self, nobs=200, x=None, distr_x=None, distr_noise=None):
if x is None and distr_x is None:
from scipy import stats
distr_x = stats.uniform(-2, 4)
else:
nobs = x.shape[0]
self.s_noise = 2.
self.func = func1
super(UnivariateFunc1, self).__init__(nobs=nobs, x=x,
distr_x=distr_x,
distr_noise=distr_noise)
def het_scale(self, x):
return np.sqrt(np.abs(3+x))
| bsd-3-clause |
tuqc/lyman | scripts/run_group.py | 1 | 8643 | #! /usr/bin/env python
"""
Group fMRI analysis frontend for Lyman ecosystem.
"""
import os
import sys
import time
import shutil
import os.path as op
from textwrap import dedent
import argparse
import matplotlib as mpl
mpl.use("Agg")
import nipype
from nipype import Node, MapNode, SelectFiles, DataSink, IdentityInterface
import lyman
import lyman.workflows as wf
from lyman import tools
def main(arglist):
"""Main function for workflow setup and execution."""
args = parse_args(arglist)
# Get and process specific information
project = lyman.gather_project_info()
exp = lyman.gather_experiment_info(args.experiment, args.altmodel)
if args.experiment is None:
args.experiment = project["default_exp"]
if args.altmodel:
exp_name = "-".join([args.experiment, args.altmodel])
else:
exp_name = args.experiment
# Make sure some paths are set properly
os.environ["SUBJECTS_DIR"] = project["data_dir"]
# Set roots of output storage
anal_dir_base = op.join(project["analysis_dir"], exp_name)
work_dir_base = op.join(project["working_dir"], exp_name)
nipype.config.set("execution", "crashdump_dir", project["crash_dir"])
# Subject source (no iterables here)
subject_list = lyman.determine_subjects(args.subjects)
subj_source = Node(IdentityInterface(fields=["subject_id"]),
name="subj_source")
subj_source.inputs.subject_id = subject_list
# Set up the regressors and contrasts
regressors = dict(group_mean=[1] * len(subject_list))
contrasts = [["group_mean", "T", ["group_mean"], [1]]]
# Subject level contrast source
contrast_source = Node(IdentityInterface(fields=["l1_contrast"]),
iterables=("l1_contrast", exp["contrast_names"]),
name="contrast_source")
# Group workflow
space = args.regspace
wf_name = "_".join([space, args.output])
if space == "mni":
mfx, mfx_input, mfx_output = wf.create_volume_mixedfx_workflow(
wf_name, subject_list, regressors, contrasts, exp)
else:
mfx, mfx_input, mfx_output = wf.create_surface_ols_workflow(
wf_name, subject_list, exp)
# Mixed effects inputs
ffxspace = "mni" if space == "mni" else "epi"
ffxsmooth = "unsmoothed" if args.unsmoothed else "smoothed"
mfx_base = op.join("{subject_id}/ffx/%s/%s/{l1_contrast}" % (ffxspace,
ffxsmooth))
templates = dict(copes=op.join(mfx_base, "cope1.nii.gz"))
if space == "mni":
templates.update(dict(
varcopes=op.join(mfx_base, "varcope1.nii.gz"),
dofs=op.join(mfx_base, "tdof_t1.nii.gz")))
else:
templates.update(dict(
reg_file=op.join(anal_dir_base, "{subject_id}/preproc/run_1",
"func2anat_tkreg.dat")))
# Workflow source node
mfx_source = MapNode(SelectFiles(templates,
base_directory=anal_dir_base,
sort_filelist=True),
"subject_id",
"mfx_source")
# Workflow input connections
mfx.connect([
(contrast_source, mfx_source,
[("l1_contrast", "l1_contrast")]),
(contrast_source, mfx_input,
[("l1_contrast", "l1_contrast")]),
(subj_source, mfx_source,
[("subject_id", "subject_id")]),
(mfx_source, mfx_input,
[("copes", "copes")])
]),
if space == "mni":
mfx.connect([
(mfx_source, mfx_input,
[("varcopes", "varcopes"),
("dofs", "dofs")]),
])
else:
mfx.connect([
(mfx_source, mfx_input,
[("reg_file", "reg_file")]),
(subj_source, mfx_input,
[("subject_id", "subject_id")])
])
# Mixed effects outputs
mfx_sink = Node(DataSink(base_directory="/".join([anal_dir_base,
args.output,
space]),
substitutions=[("/stats", "/"),
("/_hemi_", "/"),
("_glm_results", "")],
parameterization=True),
name="mfx_sink")
mfx_outwrap = tools.OutputWrapper(mfx, subj_source,
mfx_sink, mfx_output)
mfx_outwrap.sink_outputs()
mfx_outwrap.set_mapnode_substitutions(1)
mfx_outwrap.add_regexp_substitutions([
(r"_l1_contrast_[-\w]*/", "/"),
(r"_mni_hemi_[lr]h", "")
])
mfx.connect(contrast_source, "l1_contrast",
mfx_sink, "container")
# Set a few last things
mfx.base_dir = work_dir_base
# Execute
lyman.run_workflow(mfx, args=args)
# Clean up
if project["rm_working_dir"]:
shutil.rmtree(project["working_dir"])
def parse_args(arglist):
"""Take an arglist and return an argparse Namespace."""
help = dedent("""
Perform a basic group analysis in lyman.
This script currently only handles one-sample group mean tests on each of
the fixed-effects contrasts. It is possible to run the group model in the
volume or on the surface, although the actual model changes depending on
this choice.
The volume model runs FSL's FLAME mixed effects for hierarchical inference,
which uses the lower-level variance estimates, and it applies standard
GRF-based correction for multiple comparisons. The details of the
model-fitting procedure are set in the experiment file, along with the
thresholds used for correction.
The surface model uses a standard ordinary least squares fit and does
correction with an approach based on a Monte Carlo simulation of the null
distribution of cluster sizes for smoothed Gaussian data. Fortunately, the
simulations are cached so this runs very quickly. Unfortunately, the cached
simulations used a whole-brain search space, so this will be overly
conservative for partial-brain acquisitions.
Because of how GRF-based correction works, the thresholded volume images
only have positive voxels. It is up to you to define "negative" versions of
any contrasts where you are interested in relative deactivation. The
surface correction does not have this constraint, and the test sign is
configurable in the experiment file (and will thus apply to all contrasts).
By default the results are written under `group` next to the subject level
data in the lyman analysis directory, although the output directory name
can be changed.
Examples
--------
Note that the parameter switches match any unique short version
of the full parameter name.
run_group.py
With no arguments, this will process the default experiment with the
subjects defined in $LYMAN_DIR/subjects.txt in the MNI space using the
MultiProc plugin with 4 processes.
run_group.py -s pilot_subjects -r fsaverage -o pilot -unsmoothed
This will processes the subjects defined in a file at
$LYMAN_DIR/pilot_subjects.txt as above but with the surface workflow.
Unsmoothed fixed effects parameter estimates will be sampled to the
surface and smoothed there. The resulting files will be stored under
<analysis_dir>/<experiment>/pilot/fsaverage/<contrast>/<hemi>
run_group.py -e nback -a parametric -p sge -q batch.q
This will process an alternate model for the `nback` experiment using
the SGE plugin by submitting jobs to the batch.q queue.
Usage Details
-------------
""")
parser = tools.parser
parser.description = help
parser.formatter_class = argparse.RawDescriptionHelpFormatter
parser.add_argument("-experiment", help="experimental paradigm")
parser.add_argument("-altmodel", help="alternate model to fit")
parser.add_argument("-regspace", default="mni",
choices=["mni", "fsaverage"],
help="common space for group analysis")
parser.add_argument("-unsmoothed", action="store_true",
help="used unsmoothed fixed effects outputs")
parser.add_argument("-output", default="group",
help="output directory name")
return parser.parse_args(arglist)
if __name__ == "__main__":
main(sys.argv[1:])
| bsd-3-clause |
MDBrothers/FluidMechanicsToys | ideal_flow_2D/postprocessing.py | 1 | 1127 | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
import sys
pressure = sys.argv[1]
def load(fname):
''' load file using std open '''
f = open(fname, 'r')
data = []
for line in f.readlines():
data.append(line.replace('\n','').replace(',', ' ').strip(' ').split(' '))
f.close()
return np.array(np.concatenate(data), dtype=np.float64)
def main():
#output files will be loaded into 1-D arrays left to right, top to bottom
#grid_shape_data = np.loadtxt(boundary_conditions_filename, delimiter = ' ,', unpack = True)
pressure_values = load(pressure);
print pressure_values
print " "
print pressure_values[0:pressure_values.size:3].size
print pressure_values[1:pressure_values.size:3].size
print pressure_values[2:pressure_values.size:3].size
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.scatter(pressure_values[0:66:3], pressure_values[1:66:3], pressure_values[2:66:3], label='parametric curve')
ax.legend()
plt.show()
if __name__ == "__main__":
main()
| mit |
cg31/tensorflow | tensorflow/examples/learn/text_classification.py | 6 | 4438 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for DNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib import learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 10
EMBEDDING_SIZE = 50
n_words = 0
def bag_of_words_model(x, y):
"""A bag-of-words model. Note it disregards the word order in the text."""
target = tf.one_hot(y, 15, 1, 0)
word_vectors = learn.ops.categorical_variable(x, n_classes=n_words,
embedding_size=EMBEDDING_SIZE, name='words')
features = tf.reduce_max(word_vectors, reduction_indices=1)
prediction, loss = learn.models.logistic_regression(features, target)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(),
optimizer='Adam', learning_rate=0.01)
return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op
def rnn_model(x, y):
"""Recurrent neural network model to predict from sequence of words
to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = learn.ops.categorical_variable(x, n_classes=n_words,
embedding_size=EMBEDDING_SIZE, name='words')
# Split into list of embedding per word, while removing doc length dim.
# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
word_list = tf.unpack(word_vectors, axis=1)
# Create a Gated Recurrent Unit cell with hidden size of EMBEDDING_SIZE.
cell = tf.nn.rnn_cell.GRUCell(EMBEDDING_SIZE)
# Create an unrolled Recurrent Neural Networks to length of
# MAX_DOCUMENT_LENGTH and passes word_list as inputs for each unit.
_, encoding = tf.nn.rnn(cell, word_list, dtype=tf.float32)
# Given encoding of RNN, take encoding of last step (e.g hidden size of the
# neural network of last step) and pass it as features for logistic
# regression over output classes.
target = tf.one_hot(y, 15, 1, 0)
prediction, loss = learn.models.logistic_regression(encoding, target)
# Create a training op.
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(),
optimizer='Adam', learning_rate=0.01)
return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(vocab_processor.fit_transform(x_train)))
x_test = np.array(list(vocab_processor.transform(x_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
classifier = learn.Estimator(model_fn=bag_of_words_model)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(x_test, as_iterable=True)]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true'
)
FLAGS = parser.parse_args()
tf.app.run()
| apache-2.0 |
roshchupkin/VBM | scripts/tools/summary.py | 1 | 3768 | import nipy
import pandas as pd
import numpy as np
import argparse
import os
from collections import OrderedDict
import sys
sys.path.append( os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) )
from config import *
parser = argparse.ArgumentParser(description='Script to create summary of VBM experiment.'
'Summary table:'
'Brain Region,Region size (#voxels),min p-value,# negative voxels,# positive voxels ')
parser.add_argument("-p",required=True, type=str, help="p-map image (with real p-values, not (1-p_value)!)")
parser.add_argument("-t",required=True, type=str, help="t-map or b-map image")
parser.add_argument("-a", type=str,default='Hammer',choices=['Hammer','FreeSurfer','Tracts'], help="Atlas name")
parser.add_argument("-o",required=True, type=str, help="output folder")
parser.add_argument("-n",required=True, type=str, help="result table name")
parser.add_argument("-th",required=True, type=float, help="p-value threshold")
parser.add_argument("-mask",default=None, type=str, help="path mask image")
parser.add_argument("-atlas",default=None, type=str, help="path atlas image")
parser.add_argument("-tract_th", type=float, default=0.0, help='tracts threshold for minimum probability to include voxel')
args = parser.parse_args()
print args
def get_atlas(atlas_name,atlas_path):
Atlas={}
Atlas['mask']={}
Atlas['regions']={}
if atlas_path is None:
Atlas_path=ATLAS[atlas_name]
else:
Atlas_path = atlas_path
Atlas_table=INFO_TABLE[atlas_name]
A=nipy.load_image(Atlas_path)
Table=pd.read_csv(Atlas_table, sep=',', header=None)
if atlas_name!='Tracts':
u=np.unique(A._data)
for j,i in enumerate(Table[0]):
if i in u:
Atlas['regions'][i]=Table[1][j]
Atlas['mask'][i]=np.where(A._data==i)
return Atlas
else:
Atlas['mask']=A
Atlas['regions']=Table[1].tolist()
return Atlas
if __name__=="__main__":
if args.th<=0 or args.th>=1:
raise ValueError('Threshold should be 0 < threshold < 1, not {}'.format(args.th))
results=OrderedDict()
Atlas=get_atlas(args.a, args.atlas)
P=nipy.load_image(args.p)
if args.mask is not None:
M=nipy.load_image(args.mask)
P._data[M._data==0]=1
results['Brain Region']=[]
results['Region size (#voxels)']=[]
results['min p-value']=[]
results['# negative voxels']=[]
results['# positive voxels']=[]
mask=np.where(P._data>args.th)
P._data[mask]=1
P._data[P._data==0]=1 #TODO change:check with mask, if zero outside, then it is significant
T_neg=nipy.load_image(args.t)
T_pos=nipy.load_image(args.t)
T_neg._data[mask]=0
T_pos._data[mask]=0
T_pos[T_pos._data<0]=0
T_neg[T_neg._data>0]=0
if args.a!='Tracts':
for k in Atlas['mask']:
results['Brain Region'].append(Atlas['regions'][k])
results['Region size (#voxels)'].append( len(Atlas['mask'][k][0]) )
results['min p-value'].append(np.min( P._data[Atlas['mask'][k]] ))
results['# negative voxels'].append(len( np.where(T_neg._data[Atlas['mask'][k]]!=0)[0] ))
results['# positive voxels'].append(len( np.where(T_pos._data[Atlas['mask'][k]]!=0)[0] ))
else:
for j,i in enumerate(Atlas['regions']):
results['Brain Region'].append(i)
tract=Atlas['mask'][:,:,:,j+1] #Tract atlas starts from 0 dim with no info
#print i, tract.shape, args.tract_th
tract_mask=np.where(tract._data>args.tract_th)
#print tract_mask[0]
results['Region size (#voxels)'].append( len(tract_mask[0]) )
results['min p-value'].append(np.min(P._data[tract_mask]))
results['# negative voxels'].append(len(np.where(T_neg._data[tract_mask] != 0)[0]))
results['# positive voxels'].append(len(np.where(T_pos._data[tract_mask] != 0)[0]))
df=pd.DataFrame.from_dict(results)
df.to_csv(os.path.join(args.o,args.n), sep=',', index=False)
| gpl-2.0 |
doordash/auto_ml | tests/regressors.py | 1 | 12027 | import datetime
import os
import random
import sys
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
from quantile_ml import Predictor
from quantile_ml.utils_models import load_ml_model
import dill
from nose.tools import assert_equal, assert_not_equal, with_setup
import numpy as np
from sklearn.model_selection import train_test_split
import utils_testing as utils
def optimize_final_model_regression(model_name=None):
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
ml_predictor.train(df_boston_train, optimize_final_model=True, model_names=model_name)
test_score = ml_predictor.score(df_boston_test, df_boston_test.MEDV)
print('test_score')
print(test_score)
# the random seed gets a score of -3.21 on python 3.5
# There's a ton of noise here, due to small sample sizes
lower_bound = -3.4
if model_name == 'DeepLearningRegressor':
lower_bound = -20
if model_name == 'LGBMRegressor':
lower_bound = -5.5
if model_name == 'GradientBoostingRegressor':
lower_bound = -3.5
assert lower_bound < test_score < -2.8
def categorical_ensembling_regression(model_name=None):
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
ml_predictor.train_categorical_ensemble(df_boston_train, perform_feature_selection=True, model_names=model_name, categorical_column='CHAS')
test_score = ml_predictor.score(df_boston_test, df_boston_test.MEDV)
print('test_score')
print(test_score)
# Bumping this up since without these features our score drops
lower_bound = -4.0
if model_name == 'DeepLearningRegressor':
lower_bound = -19
if model_name == 'LGBMRegressor':
lower_bound = -4.95
assert lower_bound < test_score < -2.8
def getting_single_predictions_regression(model_name=None):
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
ml_predictor.train(df_boston_train, perform_feature_scaling=False, model_names=model_name)
file_name = ml_predictor.save(str(random.random()))
# if model_name == 'DeepLearningRegressor':
# from quantile_ml.utils_models import load_keras_model
# saved_ml_pipeline = load_keras_model(file_name)
# else:
# with open(file_name, 'rb') as read_file:
# saved_ml_pipeline = dill.load(read_file)
saved_ml_pipeline = load_ml_model(file_name)
os.remove(file_name)
try:
keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'
os.remove(keras_file_name)
except:
pass
df_boston_test_dictionaries = df_boston_test.to_dict('records')
# 1. make sure the accuracy is the same
predictions = []
for row in df_boston_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
print('predictions')
print(predictions)
print('predictions[0]')
print(predictions[0])
print('type(predictions)')
print(type(predictions))
first_score = utils.calculate_rmse(df_boston_test.MEDV, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
lower_bound = -3.2
if model_name == 'DeepLearningRegressor':
lower_bound = -8.8
if model_name == 'LGBMRegressor':
lower_bound = -4.95
if model_name == 'XGBRegressor':
lower_bound = -3.4
assert lower_bound < first_score < -2.8
# 2. make sure the speed is reasonable (do it a few extra times)
data_length = len(df_boston_test_dictionaries)
start_time = datetime.datetime.now()
for idx in range(1000):
row_num = idx % data_length
saved_ml_pipeline.predict(df_boston_test_dictionaries[row_num])
end_time = datetime.datetime.now()
duration = end_time - start_time
print('duration.total_seconds()')
print(duration.total_seconds())
# It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
assert 0.1 < duration.total_seconds() / 1.0 < 15
# 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_boston_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
second_score = utils.calculate_rmse(df_boston_test.MEDV, predictions)
print('second_score')
print(second_score)
# Make sure our score is good, but not unreasonably good
assert lower_bound < second_score < -2.8
def feature_learning_getting_single_predictions_regression(model_name=None):
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
# NOTE: this is bad practice to pass in our same training set as our fl_data set, but we don't have enough data to do it any other way
df_boston_train, fl_data = train_test_split(df_boston_train, test_size=0.2)
ml_predictor.train(df_boston_train, model_names=model_name, feature_learning=True, fl_data=fl_data)
file_name = ml_predictor.save(str(random.random()))
# from quantile_ml.utils_models import load_keras_model
# saved_ml_pipeline = load_keras_model(file_name)
saved_ml_pipeline = load_ml_model(file_name)
os.remove(file_name)
try:
keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'
os.remove(keras_file_name)
except:
pass
df_boston_test_dictionaries = df_boston_test.to_dict('records')
# 1. make sure the accuracy is the same
predictions = []
for row in df_boston_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
first_score = utils.calculate_rmse(df_boston_test.MEDV, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
lower_bound = -3.2
if model_name == 'DeepLearningRegressor':
lower_bound = -23
if model_name == 'LGBMRegressor':
lower_bound = -4.95
if model_name == 'XGBRegressor':
lower_bound = -3.3
assert lower_bound < first_score < -2.8
# 2. make sure the speed is reasonable (do it a few extra times)
data_length = len(df_boston_test_dictionaries)
start_time = datetime.datetime.now()
for idx in range(1000):
row_num = idx % data_length
saved_ml_pipeline.predict(df_boston_test_dictionaries[row_num])
end_time = datetime.datetime.now()
duration = end_time - start_time
print('duration.total_seconds()')
print(duration.total_seconds())
# It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
assert 0.2 < duration.total_seconds() / 1.0 < 15
# 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_boston_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
second_score = utils.calculate_rmse(df_boston_test.MEDV, predictions)
print('second_score')
print(second_score)
# Make sure our score is good, but not unreasonably good
assert lower_bound < second_score < -2.8
def feature_learning_categorical_ensembling_getting_single_predictions_regression(model_name=None):
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
# NOTE: this is bad practice to pass in our same training set as our fl_data set, but we don't have enough data to do it any other way
df_boston_train, fl_data = train_test_split(df_boston_train, test_size=0.2)
ml_predictor.train_categorical_ensemble(df_boston_train, model_names=model_name, feature_learning=False, fl_data=fl_data, categorical_column='CHAS')
file_name = ml_predictor.save(str(random.random()))
from quantile_ml.utils_models import load_ml_model
saved_ml_pipeline = load_ml_model(file_name)
# with open(file_name, 'rb') as read_file:
# saved_ml_pipeline = dill.load(read_file)
os.remove(file_name)
try:
keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'
os.remove(keras_file_name)
except:
pass
df_boston_test_dictionaries = df_boston_test.to_dict('records')
# 1. make sure the accuracy is the same
predictions = []
for row in df_boston_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
first_score = utils.calculate_rmse(df_boston_test.MEDV, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
lower_bound = -3.2
if model_name == 'DeepLearningRegressor':
lower_bound = -21.5
if model_name == 'LGBMRegressor':
lower_bound = -5.1
if model_name == 'XGBRegressor':
lower_bound = -3.6
if model_name == 'GradientBoostingRegressor':
lower_bound = -3.6
assert lower_bound < first_score < -2.8
# 2. make sure the speed is reasonable (do it a few extra times)
data_length = len(df_boston_test_dictionaries)
start_time = datetime.datetime.now()
for idx in range(1000):
row_num = idx % data_length
saved_ml_pipeline.predict(df_boston_test_dictionaries[row_num])
end_time = datetime.datetime.now()
duration = end_time - start_time
print('duration.total_seconds()')
print(duration.total_seconds())
# It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
assert 0.2 < duration.total_seconds() / 1.0 < 15
# 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_boston_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
second_score = utils.calculate_rmse(df_boston_test.MEDV, predictions)
print('second_score')
print(second_score)
# Make sure our score is good, but not unreasonably good
assert lower_bound < second_score < -2.8
| mit |
bsipocz/scikit-image | doc/examples/plot_view_as_blocks.py | 17 | 2002 | """
============================
Block views on images/arrays
============================
This example illustrates the use of `view_as_blocks` from
`skimage.util.shape`. Block views can be incredibly useful when one
wants to perform local operations on non-overlapping image patches.
We use `astronaut` from `skimage.data` and virtually 'slice' it into square
blocks. Then, on each block, we either pool the mean, the max or the
median value of that block. The results are displayed altogether, along
with a spline interpolation of order 3 rescaling of the original `astronaut`
image.
"""
import numpy as np
from scipy import ndimage as ndi
from matplotlib import pyplot as plt
import matplotlib.cm as cm
from skimage import data
from skimage import color
from skimage.util.shape import view_as_blocks
# -- get `astronaut` from skimage.data in grayscale
l = color.rgb2gray(data.astronaut())
# -- size of blocks
block_shape = (4, 4)
# -- see `astronaut` as a matrix of blocks (of shape
# `block_shape`)
view = view_as_blocks(l, block_shape)
# -- collapse the last two dimensions in one
flatten_view = view.reshape(view.shape[0], view.shape[1], -1)
# -- resampling `astronaut` by taking either the `mean`,
# the `max` or the `median` value of each blocks.
mean_view = np.mean(flatten_view, axis=2)
max_view = np.max(flatten_view, axis=2)
median_view = np.median(flatten_view, axis=2)
# -- display resampled images
fig, axes = plt.subplots(2, 2, figsize=(8, 8))
ax0, ax1, ax2, ax3 = axes.ravel()
ax0.set_title("Original rescaled with\n spline interpolation (order=3)")
l_resized = ndi.zoom(l, 2, order=3)
ax0.imshow(l_resized, cmap=cm.Greys_r)
ax1.set_title("Block view with\n local mean pooling")
ax1.imshow(mean_view, cmap=cm.Greys_r)
ax2.set_title("Block view with\n local max pooling")
ax2.imshow(max_view, cmap=cm.Greys_r)
ax3.set_title("Block view with\n local median pooling")
ax3.imshow(median_view, cmap=cm.Greys_r)
fig.subplots_adjust(hspace=0.4, wspace=0.4)
plt.show()
| bsd-3-clause |
jmmease/pandas | pandas/tests/frame/test_period.py | 18 | 5005 | import numpy as np
from numpy.random import randn
from datetime import timedelta
import pandas as pd
import pandas.util.testing as tm
from pandas import (PeriodIndex, period_range, DataFrame, date_range,
Index, to_datetime, DatetimeIndex)
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestPeriodIndex(object):
def setup_method(self, method):
pass
def test_as_frame_columns(self):
rng = period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
ts = df[rng[0]]
tm.assert_series_equal(ts, df.iloc[:, 0])
# GH # 1211
repr(df)
ts = df['1/1/2000']
tm.assert_series_equal(ts, df.iloc[:, 0])
def test_frame_setitem(self):
rng = period_range('1/1/2000', periods=5, name='index')
df = DataFrame(randn(5, 3), index=rng)
df['Index'] = rng
rs = Index(df['Index'])
tm.assert_index_equal(rs, rng, check_names=False)
assert rs.name == 'Index'
assert rng.name == 'index'
rs = df.reset_index().set_index('index')
assert isinstance(rs.index, PeriodIndex)
tm.assert_index_equal(rs.index, rng)
def test_frame_to_time_stamp(self):
K = 5
index = PeriodIndex(freq='A', start='1/1/2001', end='12/1/2009')
df = DataFrame(randn(len(index), K), index=index)
df['mix'] = 'a'
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
result = df.to_timestamp('D', 'end')
tm.assert_index_equal(result.index, exp_index)
tm.assert_numpy_array_equal(result.values, df.values)
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-JAN')
result = df.to_timestamp('D', 'start')
tm.assert_index_equal(result.index, exp_index)
def _get_with_delta(delta, freq='A-DEC'):
return date_range(to_datetime('1/1/2001') + delta,
to_datetime('12/31/2009') + delta, freq=freq)
delta = timedelta(hours=23)
result = df.to_timestamp('H', 'end')
exp_index = _get_with_delta(delta)
tm.assert_index_equal(result.index, exp_index)
delta = timedelta(hours=23, minutes=59)
result = df.to_timestamp('T', 'end')
exp_index = _get_with_delta(delta)
tm.assert_index_equal(result.index, exp_index)
result = df.to_timestamp('S', 'end')
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
tm.assert_index_equal(result.index, exp_index)
# columns
df = df.T
exp_index = date_range('1/1/2001', end='12/31/2009', freq='A-DEC')
result = df.to_timestamp('D', 'end', axis=1)
tm.assert_index_equal(result.columns, exp_index)
tm.assert_numpy_array_equal(result.values, df.values)
exp_index = date_range('1/1/2001', end='1/1/2009', freq='AS-JAN')
result = df.to_timestamp('D', 'start', axis=1)
tm.assert_index_equal(result.columns, exp_index)
delta = timedelta(hours=23)
result = df.to_timestamp('H', 'end', axis=1)
exp_index = _get_with_delta(delta)
tm.assert_index_equal(result.columns, exp_index)
delta = timedelta(hours=23, minutes=59)
result = df.to_timestamp('T', 'end', axis=1)
exp_index = _get_with_delta(delta)
tm.assert_index_equal(result.columns, exp_index)
result = df.to_timestamp('S', 'end', axis=1)
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
tm.assert_index_equal(result.columns, exp_index)
# invalid axis
tm.assert_raises_regex(
ValueError, 'axis', df.to_timestamp, axis=2)
result1 = df.to_timestamp('5t', axis=1)
result2 = df.to_timestamp('t', axis=1)
expected = pd.date_range('2001-01-01', '2009-01-01', freq='AS')
assert isinstance(result1.columns, DatetimeIndex)
assert isinstance(result2.columns, DatetimeIndex)
tm.assert_numpy_array_equal(result1.columns.asi8, expected.asi8)
tm.assert_numpy_array_equal(result2.columns.asi8, expected.asi8)
# PeriodIndex.to_timestamp always use 'infer'
assert result1.columns.freqstr == 'AS-JAN'
assert result2.columns.freqstr == 'AS-JAN'
def test_frame_index_to_string(self):
index = PeriodIndex(['2011-1', '2011-2', '2011-3'], freq='M')
frame = DataFrame(np.random.randn(3, 4), index=index)
# it works!
frame.to_string()
def test_align_frame(self):
rng = period_range('1/1/2000', '1/1/2010', freq='A')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.values[1::2] = np.nan
tm.assert_frame_equal(result, expected)
result = ts + _permute(ts[::2])
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/IPython/config/loader.py | 7 | 28718 | """A simple configuration system.
Inheritance diagram:
.. inheritance-diagram:: IPython.config.loader
:parts: 3
Authors
-------
* Brian Granger
* Fernando Perez
* Min RK
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import argparse
import copy
import logging
import os
import re
import sys
import json
from IPython.utils.path import filefind, get_ipython_dir
from IPython.utils import py3compat
from IPython.utils.encoding import DEFAULT_ENCODING
from IPython.utils.py3compat import unicode_type, iteritems
from IPython.utils.traitlets import HasTraits, List, Any, TraitError
#-----------------------------------------------------------------------------
# Exceptions
#-----------------------------------------------------------------------------
class ConfigError(Exception):
pass
class ConfigLoaderError(ConfigError):
pass
class ConfigFileNotFound(ConfigError):
pass
class ArgumentError(ConfigLoaderError):
pass
#-----------------------------------------------------------------------------
# Argparse fix
#-----------------------------------------------------------------------------
# Unfortunately argparse by default prints help messages to stderr instead of
# stdout. This makes it annoying to capture long help screens at the command
# line, since one must know how to pipe stderr, which many users don't know how
# to do. So we override the print_help method with one that defaults to
# stdout and use our class instead.
class ArgumentParser(argparse.ArgumentParser):
"""Simple argparse subclass that prints help to stdout by default."""
def print_help(self, file=None):
if file is None:
file = sys.stdout
return super(ArgumentParser, self).print_help(file)
print_help.__doc__ = argparse.ArgumentParser.print_help.__doc__
#-----------------------------------------------------------------------------
# Config class for holding config information
#-----------------------------------------------------------------------------
class LazyConfigValue(HasTraits):
"""Proxy object for exposing methods on configurable containers
Exposes:
- append, extend, insert on lists
- update on dicts
- update, add on sets
"""
_value = None
# list methods
_extend = List()
_prepend = List()
def append(self, obj):
self._extend.append(obj)
def extend(self, other):
self._extend.extend(other)
def prepend(self, other):
"""like list.extend, but for the front"""
self._prepend[:0] = other
_inserts = List()
def insert(self, index, other):
if not isinstance(index, int):
raise TypeError("An integer is required")
self._inserts.append((index, other))
# dict methods
# update is used for both dict and set
_update = Any()
def update(self, other):
if self._update is None:
if isinstance(other, dict):
self._update = {}
else:
self._update = set()
self._update.update(other)
# set methods
def add(self, obj):
self.update({obj})
def get_value(self, initial):
"""construct the value from the initial one
after applying any insert / extend / update changes
"""
if self._value is not None:
return self._value
value = copy.deepcopy(initial)
if isinstance(value, list):
for idx, obj in self._inserts:
value.insert(idx, obj)
value[:0] = self._prepend
value.extend(self._extend)
elif isinstance(value, dict):
if self._update:
value.update(self._update)
elif isinstance(value, set):
if self._update:
value.update(self._update)
self._value = value
return value
def to_dict(self):
"""return JSONable dict form of my data
Currently update as dict or set, extend, prepend as lists, and inserts as list of tuples.
"""
d = {}
if self._update:
d['update'] = self._update
if self._extend:
d['extend'] = self._extend
if self._prepend:
d['prepend'] = self._prepend
elif self._inserts:
d['inserts'] = self._inserts
return d
def _is_section_key(key):
"""Is a Config key a section name (does it start with a capital)?"""
if key and key[0].upper()==key[0] and not key.startswith('_'):
return True
else:
return False
class Config(dict):
"""An attribute based dict that can do smart merges."""
def __init__(self, *args, **kwds):
dict.__init__(self, *args, **kwds)
self._ensure_subconfig()
def _ensure_subconfig(self):
"""ensure that sub-dicts that should be Config objects are
casts dicts that are under section keys to Config objects,
which is necessary for constructing Config objects from dict literals.
"""
for key in self:
obj = self[key]
if _is_section_key(key) \
and isinstance(obj, dict) \
and not isinstance(obj, Config):
setattr(self, key, Config(obj))
def _merge(self, other):
"""deprecated alias, use Config.merge()"""
self.merge(other)
def merge(self, other):
"""merge another config object into this one"""
to_update = {}
for k, v in iteritems(other):
if k not in self:
to_update[k] = copy.deepcopy(v)
else: # I have this key
if isinstance(v, Config) and isinstance(self[k], Config):
# Recursively merge common sub Configs
self[k].merge(v)
else:
# Plain updates for non-Configs
to_update[k] = copy.deepcopy(v)
self.update(to_update)
def __contains__(self, key):
# allow nested contains of the form `"Section.key" in config`
if '.' in key:
first, remainder = key.split('.', 1)
if first not in self:
return False
return remainder in self[first]
return super(Config, self).__contains__(key)
# .has_key is deprecated for dictionaries.
has_key = __contains__
def _has_section(self, key):
return _is_section_key(key) and key in self
def copy(self):
return type(self)(dict.copy(self))
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo):
import copy
return type(self)(copy.deepcopy(list(self.items())))
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
if _is_section_key(key):
c = Config()
dict.__setitem__(self, key, c)
return c
elif not key.startswith('_'):
# undefined, create lazy value, used for container methods
v = LazyConfigValue()
dict.__setitem__(self, key, v)
return v
else:
raise KeyError
def __setitem__(self, key, value):
if _is_section_key(key):
if not isinstance(value, Config):
raise ValueError('values whose keys begin with an uppercase '
'char must be Config instances: %r, %r' % (key, value))
dict.__setitem__(self, key, value)
def __getattr__(self, key):
if key.startswith('__'):
return dict.__getattr__(self, key)
try:
return self.__getitem__(key)
except KeyError as e:
raise AttributeError(e)
def __setattr__(self, key, value):
if key.startswith('__'):
return dict.__setattr__(self, key, value)
try:
self.__setitem__(key, value)
except KeyError as e:
raise AttributeError(e)
def __delattr__(self, key):
if key.startswith('__'):
return dict.__delattr__(self, key)
try:
dict.__delitem__(self, key)
except KeyError as e:
raise AttributeError(e)
#-----------------------------------------------------------------------------
# Config loading classes
#-----------------------------------------------------------------------------
class ConfigLoader(object):
"""A object for loading configurations from just about anywhere.
The resulting configuration is packaged as a :class:`Config`.
Notes
-----
A :class:`ConfigLoader` does one thing: load a config from a source
(file, command line arguments) and returns the data as a :class:`Config` object.
There are lots of things that :class:`ConfigLoader` does not do. It does
not implement complex logic for finding config files. It does not handle
default values or merge multiple configs. These things need to be
handled elsewhere.
"""
def _log_default(self):
from IPython.config.application import Application
if Application.initialized():
return Application.instance().log
else:
return logging.getLogger()
def __init__(self, log=None):
"""A base class for config loaders.
log : instance of :class:`logging.Logger` to use.
By default loger of :meth:`IPython.config.application.Application.instance()`
will be used
Examples
--------
>>> cl = ConfigLoader()
>>> config = cl.load_config()
>>> config
{}
"""
self.clear()
if log is None:
self.log = self._log_default()
self.log.debug('Using default logger')
else:
self.log = log
def clear(self):
self.config = Config()
def load_config(self):
"""Load a config from somewhere, return a :class:`Config` instance.
Usually, this will cause self.config to be set and then returned.
However, in most cases, :meth:`ConfigLoader.clear` should be called
to erase any previous state.
"""
self.clear()
return self.config
class FileConfigLoader(ConfigLoader):
"""A base class for file based configurations.
As we add more file based config loaders, the common logic should go
here.
"""
def __init__(self, filename, path=None, **kw):
"""Build a config loader for a filename and path.
Parameters
----------
filename : str
The file name of the config file.
path : str, list, tuple
The path to search for the config file on, or a sequence of
paths to try in order.
"""
super(FileConfigLoader, self).__init__(**kw)
self.filename = filename
self.path = path
self.full_filename = ''
def _find_file(self):
"""Try to find the file by searching the paths."""
self.full_filename = filefind(self.filename, self.path)
class JSONFileConfigLoader(FileConfigLoader):
"""A Json file loader for config"""
def load_config(self):
"""Load the config from a file and return it as a Config object."""
self.clear()
try:
self._find_file()
except IOError as e:
raise ConfigFileNotFound(str(e))
dct = self._read_file_as_dict()
self.config = self._convert_to_config(dct)
return self.config
def _read_file_as_dict(self):
with open(self.full_filename) as f:
return json.load(f)
def _convert_to_config(self, dictionary):
if 'version' in dictionary:
version = dictionary.pop('version')
else:
version = 1
self.log.warn("Unrecognized JSON config file version, assuming version {}".format(version))
if version == 1:
return Config(dictionary)
else:
raise ValueError('Unknown version of JSON config file: {version}'.format(version=version))
class PyFileConfigLoader(FileConfigLoader):
"""A config loader for pure python files.
This is responsible for locating a Python config file by filename and
path, then executing it to construct a Config object.
"""
def load_config(self):
"""Load the config from a file and return it as a Config object."""
self.clear()
try:
self._find_file()
except IOError as e:
raise ConfigFileNotFound(str(e))
self._read_file_as_dict()
return self.config
def _read_file_as_dict(self):
"""Load the config file into self.config, with recursive loading."""
# This closure is made available in the namespace that is used
# to exec the config file. It allows users to call
# load_subconfig('myconfig.py') to load config files recursively.
# It needs to be a closure because it has references to self.path
# and self.config. The sub-config is loaded with the same path
# as the parent, but it uses an empty config which is then merged
# with the parents.
# If a profile is specified, the config file will be loaded
# from that profile
def load_subconfig(fname, profile=None):
# import here to prevent circular imports
from IPython.core.profiledir import ProfileDir, ProfileDirError
if profile is not None:
try:
profile_dir = ProfileDir.find_profile_dir_by_name(
get_ipython_dir(),
profile,
)
except ProfileDirError:
return
path = profile_dir.location
else:
path = self.path
loader = PyFileConfigLoader(fname, path)
try:
sub_config = loader.load_config()
except ConfigFileNotFound:
# Pass silently if the sub config is not there. This happens
# when a user s using a profile, but not the default config.
pass
else:
self.config.merge(sub_config)
# Again, this needs to be a closure and should be used in config
# files to get the config being loaded.
def get_config():
return self.config
namespace = dict(
load_subconfig=load_subconfig,
get_config=get_config,
__file__=self.full_filename,
)
fs_encoding = sys.getfilesystemencoding() or 'ascii'
conf_filename = self.full_filename.encode(fs_encoding)
py3compat.execfile(conf_filename, namespace)
class CommandLineConfigLoader(ConfigLoader):
"""A config loader for command line arguments.
As we add more command line based loaders, the common logic should go
here.
"""
def _exec_config_str(self, lhs, rhs):
"""execute self.config.<lhs> = <rhs>
* expands ~ with expanduser
* tries to assign with raw eval, otherwise assigns with just the string,
allowing `--C.a=foobar` and `--C.a="foobar"` to be equivalent. *Not*
equivalent are `--C.a=4` and `--C.a='4'`.
"""
rhs = os.path.expanduser(rhs)
try:
# Try to see if regular Python syntax will work. This
# won't handle strings as the quote marks are removed
# by the system shell.
value = eval(rhs)
except (NameError, SyntaxError):
# This case happens if the rhs is a string.
value = rhs
exec(u'self.config.%s = value' % lhs)
def _load_flag(self, cfg):
"""update self.config from a flag, which can be a dict or Config"""
if isinstance(cfg, (dict, Config)):
# don't clobber whole config sections, update
# each section from config:
for sec,c in iteritems(cfg):
self.config[sec].update(c)
else:
raise TypeError("Invalid flag: %r" % cfg)
# raw --identifier=value pattern
# but *also* accept '-' as wordsep, for aliases
# accepts: --foo=a
# --Class.trait=value
# --alias-name=value
# rejects: -foo=value
# --foo
# --Class.trait
kv_pattern = re.compile(r'\-\-[A-Za-z][\w\-]*(\.[\w\-]+)*\=.*')
# just flags, no assignments, with two *or one* leading '-'
# accepts: --foo
# -foo-bar-again
# rejects: --anything=anything
# --two.word
flag_pattern = re.compile(r'\-\-?\w+[\-\w]*$')
class KeyValueConfigLoader(CommandLineConfigLoader):
"""A config loader that loads key value pairs from the command line.
This allows command line options to be gives in the following form::
ipython --profile="foo" --InteractiveShell.autocall=False
"""
def __init__(self, argv=None, aliases=None, flags=None, **kw):
"""Create a key value pair config loader.
Parameters
----------
argv : list
A list that has the form of sys.argv[1:] which has unicode
elements of the form u"key=value". If this is None (default),
then sys.argv[1:] will be used.
aliases : dict
A dict of aliases for configurable traits.
Keys are the short aliases, Values are the resolved trait.
Of the form: `{'alias' : 'Configurable.trait'}`
flags : dict
A dict of flags, keyed by str name. Vaues can be Config objects,
dicts, or "key=value" strings. If Config or dict, when the flag
is triggered, The flag is loaded as `self.config.update(m)`.
Returns
-------
config : Config
The resulting Config object.
Examples
--------
>>> from IPython.config.loader import KeyValueConfigLoader
>>> cl = KeyValueConfigLoader()
>>> d = cl.load_config(["--A.name='brian'","--B.number=0"])
>>> sorted(d.items())
[('A', {'name': 'brian'}), ('B', {'number': 0})]
"""
super(KeyValueConfigLoader, self).__init__(**kw)
if argv is None:
argv = sys.argv[1:]
self.argv = argv
self.aliases = aliases or {}
self.flags = flags or {}
def clear(self):
super(KeyValueConfigLoader, self).clear()
self.extra_args = []
def _decode_argv(self, argv, enc=None):
"""decode argv if bytes, using stin.encoding, falling back on default enc"""
uargv = []
if enc is None:
enc = DEFAULT_ENCODING
for arg in argv:
if not isinstance(arg, unicode_type):
# only decode if not already decoded
arg = arg.decode(enc)
uargv.append(arg)
return uargv
def load_config(self, argv=None, aliases=None, flags=None):
"""Parse the configuration and generate the Config object.
After loading, any arguments that are not key-value or
flags will be stored in self.extra_args - a list of
unparsed command-line arguments. This is used for
arguments such as input files or subcommands.
Parameters
----------
argv : list, optional
A list that has the form of sys.argv[1:] which has unicode
elements of the form u"key=value". If this is None (default),
then self.argv will be used.
aliases : dict
A dict of aliases for configurable traits.
Keys are the short aliases, Values are the resolved trait.
Of the form: `{'alias' : 'Configurable.trait'}`
flags : dict
A dict of flags, keyed by str name. Values can be Config objects
or dicts. When the flag is triggered, The config is loaded as
`self.config.update(cfg)`.
"""
self.clear()
if argv is None:
argv = self.argv
if aliases is None:
aliases = self.aliases
if flags is None:
flags = self.flags
# ensure argv is a list of unicode strings:
uargv = self._decode_argv(argv)
for idx,raw in enumerate(uargv):
# strip leading '-'
item = raw.lstrip('-')
if raw == '--':
# don't parse arguments after '--'
# this is useful for relaying arguments to scripts, e.g.
# ipython -i foo.py --matplotlib=qt -- args after '--' go-to-foo.py
self.extra_args.extend(uargv[idx+1:])
break
if kv_pattern.match(raw):
lhs,rhs = item.split('=',1)
# Substitute longnames for aliases.
if lhs in aliases:
lhs = aliases[lhs]
if '.' not in lhs:
# probably a mistyped alias, but not technically illegal
self.log.warn("Unrecognized alias: '%s', it will probably have no effect.", raw)
try:
self._exec_config_str(lhs, rhs)
except Exception:
raise ArgumentError("Invalid argument: '%s'" % raw)
elif flag_pattern.match(raw):
if item in flags:
cfg,help = flags[item]
self._load_flag(cfg)
else:
raise ArgumentError("Unrecognized flag: '%s'"%raw)
elif raw.startswith('-'):
kv = '--'+item
if kv_pattern.match(kv):
raise ArgumentError("Invalid argument: '%s', did you mean '%s'?"%(raw, kv))
else:
raise ArgumentError("Invalid argument: '%s'"%raw)
else:
# keep all args that aren't valid in a list,
# in case our parent knows what to do with them.
self.extra_args.append(item)
return self.config
class ArgParseConfigLoader(CommandLineConfigLoader):
"""A loader that uses the argparse module to load from the command line."""
def __init__(self, argv=None, aliases=None, flags=None, log=None, *parser_args, **parser_kw):
"""Create a config loader for use with argparse.
Parameters
----------
argv : optional, list
If given, used to read command-line arguments from, otherwise
sys.argv[1:] is used.
parser_args : tuple
A tuple of positional arguments that will be passed to the
constructor of :class:`argparse.ArgumentParser`.
parser_kw : dict
A tuple of keyword arguments that will be passed to the
constructor of :class:`argparse.ArgumentParser`.
Returns
-------
config : Config
The resulting Config object.
"""
super(CommandLineConfigLoader, self).__init__(log=log)
self.clear()
if argv is None:
argv = sys.argv[1:]
self.argv = argv
self.aliases = aliases or {}
self.flags = flags or {}
self.parser_args = parser_args
self.version = parser_kw.pop("version", None)
kwargs = dict(argument_default=argparse.SUPPRESS)
kwargs.update(parser_kw)
self.parser_kw = kwargs
def load_config(self, argv=None, aliases=None, flags=None):
"""Parse command line arguments and return as a Config object.
Parameters
----------
args : optional, list
If given, a list with the structure of sys.argv[1:] to parse
arguments from. If not given, the instance's self.argv attribute
(given at construction time) is used."""
self.clear()
if argv is None:
argv = self.argv
if aliases is None:
aliases = self.aliases
if flags is None:
flags = self.flags
self._create_parser(aliases, flags)
self._parse_args(argv)
self._convert_to_config()
return self.config
def get_extra_args(self):
if hasattr(self, 'extra_args'):
return self.extra_args
else:
return []
def _create_parser(self, aliases=None, flags=None):
self.parser = ArgumentParser(*self.parser_args, **self.parser_kw)
self._add_arguments(aliases, flags)
def _add_arguments(self, aliases=None, flags=None):
raise NotImplementedError("subclasses must implement _add_arguments")
def _parse_args(self, args):
"""self.parser->self.parsed_data"""
# decode sys.argv to support unicode command-line options
enc = DEFAULT_ENCODING
uargs = [py3compat.cast_unicode(a, enc) for a in args]
self.parsed_data, self.extra_args = self.parser.parse_known_args(uargs)
def _convert_to_config(self):
"""self.parsed_data->self.config"""
for k, v in iteritems(vars(self.parsed_data)):
exec("self.config.%s = v"%k, locals(), globals())
class KVArgParseConfigLoader(ArgParseConfigLoader):
"""A config loader that loads aliases and flags with argparse,
but will use KVLoader for the rest. This allows better parsing
of common args, such as `ipython -c 'print 5'`, but still gets
arbitrary config with `ipython --InteractiveShell.use_readline=False`"""
def _add_arguments(self, aliases=None, flags=None):
self.alias_flags = {}
# print aliases, flags
if aliases is None:
aliases = self.aliases
if flags is None:
flags = self.flags
paa = self.parser.add_argument
for key,value in iteritems(aliases):
if key in flags:
# flags
nargs = '?'
else:
nargs = None
if len(key) is 1:
paa('-'+key, '--'+key, type=unicode_type, dest=value, nargs=nargs)
else:
paa('--'+key, type=unicode_type, dest=value, nargs=nargs)
for key, (value, help) in iteritems(flags):
if key in self.aliases:
#
self.alias_flags[self.aliases[key]] = value
continue
if len(key) is 1:
paa('-'+key, '--'+key, action='append_const', dest='_flags', const=value)
else:
paa('--'+key, action='append_const', dest='_flags', const=value)
def _convert_to_config(self):
"""self.parsed_data->self.config, parse unrecognized extra args via KVLoader."""
# remove subconfigs list from namespace before transforming the Namespace
if '_flags' in self.parsed_data:
subcs = self.parsed_data._flags
del self.parsed_data._flags
else:
subcs = []
for k, v in iteritems(vars(self.parsed_data)):
if v is None:
# it was a flag that shares the name of an alias
subcs.append(self.alias_flags[k])
else:
# eval the KV assignment
self._exec_config_str(k, v)
for subc in subcs:
self._load_flag(subc)
if self.extra_args:
sub_parser = KeyValueConfigLoader(log=self.log)
sub_parser.load_config(self.extra_args)
self.config.merge(sub_parser.config)
self.extra_args = sub_parser.extra_args
def load_pyconfig_files(config_files, path):
"""Load multiple Python config files, merging each of them in turn.
Parameters
==========
config_files : list of str
List of config files names to load and merge into the config.
path : unicode
The full path to the location of the config files.
"""
config = Config()
for cf in config_files:
loader = PyFileConfigLoader(cf, path=path)
try:
next_config = loader.load_config()
except ConfigFileNotFound:
pass
except:
raise
else:
config.merge(next_config)
return config
| gpl-3.0 |
LucidBlue/mykeepon-storyteller | src/realtime_audio_capture.py | 1 | 3120 | #! /usr/bin/env python
from __future__ import division
import pyaudio
import wave
import sys
import scipy
import numpy as np
import struct
CHUNK = 2**10
#from scikits.audiolab import flacread
from numpy.fft import rfft, irfft
from numpy import argmax, sqrt, mean, diff, log
import matplotlib
from scipy.signal import blackmanharris, fftconvolve
from time import time
from parabolic import parabolic
def freq_power_from_fft(sig, fs):
# Compute Fourier transform of windowed signal
windowed = sig * blackmanharris(CHUNK)
fftData = abs(rfft(windowed))
# powerData = 20*log10(fftData)
# Find the index of the peak and interpolate to get a more accurate peak
i = argmax(fftData) # Just use this for less-accurate, naive version
# make sure i is not an endpoint of the bin
if i != len(fftData)-1 and i != 0:
#print("data: " + str(parabolic(log(abs(fftData)), i)))
true_i,logmag = parabolic(log(abs(fftData)), i)
# Convert to equivalent frequency
freq= fs * true_i / len(windowed)
#print("frequency="+ str(freq) + " log of magnitude=" + str(logmag))
if logmag < 0:
logmag = 0
return freq,logmag
else:
freq = fs * i / len(windowed)
logmag = log(abs(fftData))[i]
if logmag < 0:
logmag = 0
#print("frequency="+ str(freq) + "log of magnitude not interp=" + str(logmag))
return freq,logmag
def main():
#open wav file
wf = wave.open(sys.argv[1], 'rb')
(channels,sample_width,rate,frames,comptype,compname) = wf.getparams()
FPS = 25.0
trimby = 10
divby = 100
#print("channels: %d sample width: %d rate: %d frames: %d chunk: %d" %(channels, sample_width, rate, frames, chunk))
# instantiate PyAudio
p = pyaudio.PyAudio()
stream = p.open(format = p.get_format_from_width(sample_width),
channels = channels,
rate = rate,
output = True)
data = wf.readframes(CHUNK)
freq_sum = 0.0
freq_count = 0
freq_max = 0.0
freq_min = 999999999999
while len(data) == CHUNK*sample_width:
# unpack data
indata = np.array(wave.struct.unpack("%dh"%(len(data)/sample_width),data))
#remainder of calculations
frequency,logmag = freq_power_from_fft(indata, rate)
print("frequency: " + str(frequency) + " logMagnitude: " + str(logmag))
"""
if frequency < 1000 and frequency > 500:
print "frequency: %f Hz" % (frequency)
"""
if frequency < 1000 and frequency > 0:
#print "frequency: %f Hz" % (frequency)
freq_sum += frequency
freq_count += 1
if frequency < freq_min:
freq_min = frequency
if frequency > freq_max:
freq_max = frequency
#print("freq count: " + str(freq_count))
# write data out to the audio stream after first round of calculations
stream.write(data)
# read some more data
data = wf.readframes(CHUNK)
avg_freq = freq_sum/freq_count
print("Average frequency for this clip: %f" %(avg_freq))
print("Min frequency for this clip: %f" %(freq_min))
print("Max frequency for this clip: %f" %(freq_max))
if data:
stream.write(data)
wf.close()
stream.stop_stream()
stream.close()
p.terminate()
if __name__ == "__main__":
main()
| bsd-3-clause |
lail3344/sms-tools | lectures/09-Sound-description/plots-code/hpcp.py | 25 | 1194 | import numpy as np
import matplotlib.pyplot as plt
import essentia.standard as ess
M = 1024
N = 1024
H = 512
fs = 44100
spectrum = ess.Spectrum(size=N)
window = ess.Windowing(size=M, type='hann')
spectralPeaks = ess.SpectralPeaks()
hpcp = ess.HPCP()
x = ess.MonoLoader(filename = '../../../sounds/cello-double.wav', sampleRate = fs)()
hpcps = []
for frame in ess.FrameGenerator(x, frameSize=M, hopSize=H, startFromZero=True):
mX = spectrum(window(frame))
spectralPeaks_freqs, spectralPeaks_mags = spectralPeaks(mX)
hpcp_vals = hpcp(spectralPeaks_freqs, spectralPeaks_mags)
hpcps.append(hpcp_vals)
hpcps = np.array(hpcps)
plt.figure(1, figsize=(9.5, 7))
plt.subplot(2,1,1)
plt.plot(np.arange(x.size)/float(fs), x, 'b')
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.title('x (cello-double.wav)')
plt.subplot(2,1,2)
numFrames = int(hpcps[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.pcolormesh(frmTime, np.arange(12), np.transpose(hpcps))
plt.ylabel('spectral bins')
plt.title('HPCP')
plt.autoscale(tight=True)
plt.tight_layout()
plt.savefig('hpcp.png')
plt.show()
| agpl-3.0 |
DTOcean/dtocean-core | dtocean_core/utils/database.py | 1 | 40368 | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2018 Mathew Topper
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division, print_function
# Standard Library
import os
import re
import time
import shutil
import string
import logging
import datetime as dt
# External modules
import yaml
import argparse
import geoalchemy2
import numpy as np
import pandas as pd
from shapely import geos, wkb
from win32com.client import Dispatch
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
# DTOcean modules
from aneris.utilities.database import PostgreSQL
from polite.paths import ObjDirectory, UserDataDirectory
from polite.configuration import ReadYAML
# Local modules
from . import SmartFormatter
from .files import onerror
# Set up logging
module_logger = logging.getLogger(__name__)
def bathy_records_to_strata(bathy_records=None,
pre_bathy=None,
has_mannings=False):
"""Convert the bathymetry layers table returned by the database into
Strata structure raw input"""
loop_start_time = time.clock()
# Allow a predefined bathymetry table and grid dimensions to be passed
# instead of the DB records.
if bathy_records is None and pre_bathy is None:
errStr = "One of arguments bathy_records or pre_bathy must be given"
raise ValueError(errStr)
elif bathy_records is not None:
bathy_table, xi, yj = init_bathy_records(bathy_records,
has_mannings)
elif pre_bathy is not None:
bathy_table, xi, yj = pre_bathy
else:
return None
msg = "Building layers..."
module_logger.debug(msg)
layers = list(set(bathy_table["layer_order"]))
layers.sort()
bathy_layer_groups = bathy_table.groupby("layer_order")
layer_depths = []
layer_sediments = []
for layer in layers:
layer_table = bathy_layer_groups.get_group(layer)
layer_depth, layer_sediment = build_bathy_layer(layer_table, xi, yj)
layer_depths.append(layer_depth)
layer_sediments.append(layer_sediment)
depth_array = np.dstack(layer_depths)
sediment_array = np.dstack(layer_sediments)
layer_names = ["layer {}".format(x) for x in layers]
raw_strata = {"values": {"depth": depth_array,
"sediment": sediment_array},
"coords": [xi, yj, layer_names]}
loop_end_time = time.clock()
loop_time = loop_end_time - loop_start_time
msg = ("Time elapsed building {} layer(s) was "
"{} seconds").format(len(layers), loop_time)
module_logger.debug(msg)
return raw_strata
def bathy_records_to_mannings(bathy_records=None, pre_bathy=None):
"""Convert the bathymetry layers table returned by the database into
mannings layer structure raw input"""
loop_start_time = time.clock()
# Allow a predefined bathymetry table and grid dimensions to be passed
# instead of the DB records.
if bathy_records is None and pre_bathy is None:
errStr = "One of arguments bathy_records or pre_bathy must be given"
raise ValueError(errStr)
elif bathy_records is not None:
bathy_table, xi, yj = init_bathy_records(bathy_records,
True)
elif pre_bathy is not None:
bathy_table, xi, yj = pre_bathy
else:
return None
msg = "Building mannings..."
module_logger.debug(msg)
bathy_layer_groups = bathy_table.groupby("layer_order")
layer_one_table = bathy_layer_groups.get_group(1)
mannings_array = build_mannings_layer(layer_one_table, xi, yj)
mannings_raw = {"values": mannings_array,
"coords": [xi, yj]}
loop_end_time = time.clock()
loop_time = loop_end_time - loop_start_time
msg = ("Time elapsed building mannings number array was "
"{} seconds").format(loop_time)
module_logger.debug(msg)
return mannings_raw
def tidal_series_records_to_xset(tidal_records):
"""Convert the bathymetry layers table returned by the database into
tidal time series structure raw input"""
loop_start_time = time.clock()
msg = "Building DataFrame from {} records".format(len(tidal_records))
module_logger.debug(msg)
tidal_table = pd.DataFrame.from_records(tidal_records, columns=[
'utm_point',
'measure_date',
'measure_time',
'u',
'v',
'turbulence_intensity',
'ssh'])
if tidal_table.empty: return None
msg = "Converting PostGIS Point types to coordinates..."
module_logger.debug(msg)
tidal_table = point_to_xy(tidal_table)
msg = "Getting grid extents..."
module_logger.debug(msg)
xi, yj = get_grid_coords(tidal_table)
msg = "Joining dates and times..."
module_logger.debug(msg)
tidal_table["datetime"] = [dt.datetime.combine(d, t) for
d, t in zip(tidal_table["measure_date"],
tidal_table["measure_time"])]
tidal_table = tidal_table.drop("measure_date", 1)
tidal_table = tidal_table.drop("measure_time", 1)
msg = "Building time steps..."
module_logger.debug(msg)
steps = list(set(tidal_table["datetime"]))
steps.sort()
tidal_table_groups = tidal_table.groupby("datetime")
u_steps = []
v_steps = []
ssh_steps = []
ti_steps = []
for step in steps:
step_table = tidal_table_groups.get_group(step)
(u_step,
v_step,
ssh_step,
ti_step) = build_tidal_series_step(step_table, xi, yj)
u_steps.append(u_step)
v_steps.append(v_step)
ssh_steps.append(ssh_step)
ti_steps.append(ti_step)
u_array = np.dstack(u_steps)
v_array = np.dstack(v_steps)
ssh_array = np.dstack(ssh_steps)
ti_array = np.dstack(ti_steps)
raw = {"values": {"U": u_array,
'V': v_array,
"SSH": ssh_array,
"TI": ti_array},
"coords": [xi, yj, steps]}
loop_end_time = time.clock()
loop_time = loop_end_time - loop_start_time
msg = ("Time elapsed building {} step(s) was "
"{} seconds").format(len(steps), loop_time)
module_logger.debug(msg)
return raw
def init_bathy_records(bathy_records, has_mannings=False):
msg = "Building DataFrame from {} records".format(len(bathy_records))
module_logger.debug(msg)
bathy_cols = ["utm_point", "depth"]
if has_mannings: bathy_cols.append("mannings_no")
bathy_cols.extend(["layer_order",
"initial_depth",
"sediment_type"])
bathy_table = pd.DataFrame.from_records(bathy_records, columns=bathy_cols)
if bathy_table.empty: return None
msg = "Converting PostGIS Point types to coordinates..."
module_logger.debug(msg)
bathy_table = point_to_xy(bathy_table)
msg = "Getting grid extents..."
module_logger.debug(msg)
xi, yj = get_grid_coords(bathy_table)
return bathy_table, xi, yj
def point_to_xy(df,
point_column="utm_point",
decimals=2,
drop_point_column=True):
x = []
y = []
for point_hex in df[point_column]:
point = wkb.loads(point_hex, hex=True)
coords = list(point.coords)[0]
x.append(coords[0])
y.append(coords[1])
x = np.array(x)
x = x.round(decimals)
y = np.array(y)
y = y.round(decimals)
df["x"] = x
df["y"] = y
if drop_point_column: df = df.drop(point_column, 1)
return df
def get_grid_coords(df, xlabel="x", ylabel="y"):
xi = np.unique(df[xlabel])
xdist = xi[1:] - xi[:-1]
if len(np.unique(xdist)) != 1:
safe_dist = [str(x) for x in np.unique(xdist)]
dist_str = ", ".join(safe_dist)
errStr = ("Distances in x-direction are not equal. Unique lengths "
"are: {}").format(dist_str)
raise ValueError(errStr)
yj = np.unique(df[ylabel])
ydist = yj[1:] - yj[:-1]
if len(np.unique(ydist)) != 1:
safe_dist = [str(y) for y in np.unique(ydist)]
dist_str = ", ".join(safe_dist)
errStr = ("Distances in y-direction are not equal. Unique lengths "
"are: {}").format(dist_str)
raise ValueError(errStr)
return xi, yj
def build_bathy_layer(layer_table, xi, yj):
depth_array = np.zeros([len(xi), len(yj)]) * np.nan
sediment_array = np.full([len(xi), len(yj)], None, dtype="object")
for record in layer_table.itertuples():
xidxs = np.where(xi == record.x)[0]
assert len(xidxs) == 1
xidx = xidxs[0]
yidxs = np.where(yj == record.y)[0]
assert len(yidxs) == 1
yidx = yidxs[0]
depth = record.depth - record.initial_depth
sediment = record.sediment_type
depth_array[xidx, yidx] = depth
sediment_array[xidx, yidx] = sediment
return depth_array, sediment_array
def build_mannings_layer(layer_table, xi, yj):
mannings_array = np.zeros([len(xi), len(yj)]) * np.nan
for record in layer_table.itertuples():
xidxs = np.where(xi == record.x)[0]
assert len(xidxs) == 1
xidx = xidxs[0]
yidxs = np.where(yj == record.y)[0]
assert len(yidxs) == 1
yidx = yidxs[0]
mannings_array[xidx, yidx] = record.mannings_no
return mannings_array
def build_tidal_series_step(step_table, xi, yj):
u_array = np.zeros([len(xi), len(yj)]) * np.nan
v_array = np.zeros([len(xi), len(yj)]) * np.nan
ssh_array = np.zeros([len(xi), len(yj)]) * np.nan
ti_array = np.zeros([len(xi), len(yj)]) * np.nan
for record in step_table.itertuples():
xidxs = np.where(xi == record.x)[0]
assert len(xidxs) == 1
xidx = xidxs[0]
yidxs = np.where(yj == record.y)[0]
assert len(yidxs) == 1
yidx = yidxs[0]
u_array[xidx, yidx] = record.u
v_array[xidx, yidx] = record.v
ssh_array[xidx, yidx] = record.ssh
ti_array[xidx, yidx] = record.turbulence_intensity
return u_array, v_array, ssh_array, ti_array
def get_table_df(db, schema, table, columns):
df = pd.read_sql_table(table,
db._engine,
schema=schema,
columns=columns)
return df
def get_one_from_column(db, schema, table, column):
Table = db.safe_reflect_table(table, schema)
result = db.session.query(Table.columns[column]).one_or_none()
return result
def filter_one_from_column(db,
schema,
table,
result_column,
filter_column,
filter_value):
TableTwo = db.safe_reflect_table(table, schema)
query = db.session.query(TableTwo.columns[result_column])
result = query.filter(
TableTwo.columns[filter_column]==filter_value).one_or_none()
return result
def get_all_from_columns(db, schema, table, columns):
Table = db.safe_reflect_table(table, schema)
col_lists = []
for column in columns:
col_all = db.session.query(Table.columns[column]).all()
trim_col = [item[0] for item in col_all]
col_lists.append(trim_col)
return col_lists
def database_to_files(root_path,
table_list,
database,
schema=None,
table_name_list=None,
pid_list=None,
fid_list=None,
where_list=None,
auto_child=False,
print_function=None):
if print_function is None: print_function = print
def _dump_child(root_path,
table_dict,
engine,
schema,
table_name_list=None,
pid_list=None,
fid_list=None,
where_list=None,
auto_val=None):
child_path = os.path.join(root_path, table_dict["table"])
if auto_val is not None:
child_path += str(auto_val)
auto_child = True
else:
auto_child = False
# dump a directory
if os.path.exists(child_path):
shutil.rmtree(child_path, onerror=onerror)
os.makedirs(child_path)
# Recurse for the children
database_to_files(child_path,
table_dict["children"],
engine,
schema,
table_name_list,
pid_list,
fid_list,
where_list,
auto_child,
print_function)
return
def _autofit_columns(xlpath):
excel = Dispatch('Excel.Application')
wb = excel.Workbooks.Open(os.path.abspath(xlpath))
#Activate second sheet
excel.Worksheets(1).Activate()
#Autofit column in active sheet
excel.ActiveSheet.Columns.AutoFit()
#Or simply save changes in a current file
wb.Save()
wb.Close()
return
for table_dict in table_list:
table_df = None
new_name_list = None
new_pid_list = None
new_fid_list = None
new_where_list = None
full_dict = check_dict(table_dict)
# Set the schema
if schema is None:
var_schema = full_dict["schema"]
else:
var_schema = schema
msg_str = "Dumping table: {}.{}".format(var_schema,
table_dict["table"])
print_function(msg_str)
if not full_dict["dummy"]:
if table_name_list is None:
table_idx = 0
new_name_list = [full_dict["table"]]
else:
table_idx = len(table_name_list)
new_name_list = table_name_list + [full_dict["table"]]
if fid_list is None:
new_fid_list = [full_dict["fkey"]]
else:
new_fid_list = fid_list + [full_dict["fkey"]]
# Filter by the parent table if required
query_str = query_builder(new_name_list,
pid_list,
new_fid_list,
where_list,
var_schema)
msg_str = "Executing query: {}".format(query_str)
print_function(msg_str)
# Read the table first
table_df = pd.read_sql(query_str, database._engine)
if full_dict["stripf"] and auto_child:
msg_str = "Stripping column: {}".format(full_dict["fkey"])
print_function(msg_str)
table_df = table_df.drop(full_dict["fkey"], 1)
if full_dict["array"] is not None:
array_str = ", ".join(full_dict["array"])
msg_str = "Coverting array columns: {}".format(array_str)
print_function(msg_str)
table_df = convert_array(table_df, full_dict["array"])
if full_dict["bool"] is not None:
bool_str = ", ".join(full_dict["bool"])
msg_str = "Coverting boolean columns: {}".format(bool_str)
print_function(msg_str)
table_df = convert_bool(table_df, full_dict["bool"])
if full_dict["geo"] is not None:
geo_str = ", ".join(full_dict["geo"])
msg_str = "Coverting Geometry columns: {}".format(geo_str)
print_function(msg_str)
table_df = convert_geo(table_df, full_dict["geo"])
if full_dict["time"] is not None:
time_str = ", ".join(full_dict["time"])
msg_str = "Coverting Time columns: {}".format(time_str)
print_function(msg_str)
table_df = convert_time(table_df, full_dict["time"])
if len(table_df) < 1e6:
table_fname = full_dict["table"] + ".xlsx"
tab_path = os.path.join(root_path, table_fname)
msg_str = "Writing to: {}".format(tab_path)
print_function(msg_str)
# Create a Pandas Excel writer using XlsxWriter as the engine.
writer = pd.ExcelWriter(tab_path,
engine='xlsxwriter')
# Convert the dataframe to an XlsxWriter Excel object.
table_df.to_excel(writer, index=False)
# Close the Pandas Excel writer and output the Excel file.
writer.save()
# Fit the column widths (don't let failure be catastrophic)
try:
_autofit_columns(tab_path)
except:
print_function("*** Column adjust failed. Skipping. ***")
pass
else:
table_fname = full_dict["table"] + ".csv"
tab_path = os.path.join(root_path, table_fname)
msg_str = "Writing to: {}".format(tab_path)
print_function(msg_str)
table_df.to_csv(tab_path, index=False)
if full_dict["children"] is not None:
# Include pid in iteration
if full_dict["pkey"] is not None:
if pid_list is None:
new_pid_list = [full_dict["pkey"]]
else:
new_pid_list = pid_list + [full_dict["pkey"]]
# Check autokey
if full_dict["autokey"]:
pkids = table_df[full_dict["pkey"]]
del(table_df)
for pkid in pkids:
# Add a where
new_where = {"table#": table_idx,
"value": pkid}
if where_list is None:
new_where_list = [new_where]
else:
new_where_list = where_list + [new_where]
_dump_child(root_path,
table_dict,
database,
var_schema,
new_name_list,
new_pid_list,
new_fid_list,
new_where_list,
pkid)
else:
del(table_df)
if where_list is None:
new_where_list = None
else:
new_where_list = where_list[:]
_dump_child(root_path,
table_dict,
database,
var_schema,
new_name_list,
new_pid_list,
new_fid_list,
new_where_list)
return
def database_from_files(root_path,
table_list,
database,
schema=None,
add_fid=None,
truncate=True,
drop_missing=True,
print_function=None):
if print_function is None: print_function = print
def _list_dirs(path, tab_match):
dir_list = [item for item in os.listdir(path)
if os.path.isdir(os.path.join(path, item))]
match_str = r'^{}[0-9]+'.format(tab_match)
dir_list = [s for s in dir_list if re.search(match_str, s)]
dir_list.sort()
return dir_list
for table_dict in table_list:
full_dict = check_dict(table_dict)
# Set the schema
if schema is None:
var_schema = full_dict["schema"]
else:
var_schema = schema
if not full_dict["dummy"]:
xlname = "{}.xlsx".format(full_dict["table"])
xlpath = os.path.join(root_path, xlname)
csvname = "{}.csv".format(full_dict["table"])
csvpath = os.path.join(root_path, csvname)
# Try to read the table as xl or csv
if os.path.isfile(xlpath):
msg_str = "Reading file: {}".format(xlpath)
print_function(msg_str)
xl = pd.ExcelFile(xlpath)
df = xl.parse("Sheet1")
elif os.path.isfile(csvpath):
msg_str = "Reading file: {}".format(csvpath)
print_function(msg_str)
df = pd.read_csv(csvpath)
else:
errStr = ("Table {} could not be found in directory "
"{}").format(full_dict["table"], root_path)
raise IOError(errStr)
if add_fid is not None:
msg_str = ("Adding foreign key '{}' with value: "
"{}").format(full_dict["fkey"], add_fid)
print_function(msg_str)
df[full_dict["fkey"]] = add_fid
# Get the table name
dbname = "{}.{}".format(var_schema, full_dict["table"])
# Clean the table
if truncate:
msg_str = "Truncating table: {}".format(dbname)
print_function(msg_str)
query_str = "TRUNCATE TABLE {} CASCADE".format(dbname)
database.execute_transaction(query_str)
# Drop columns not in the recepting table
if drop_missing:
actual_tables = database.get_column_names(full_dict["table"],
var_schema)
missing_set = set(df.columns) - set(actual_tables)
if missing_set:
cols_str = ", ".join(missing_set)
msg_str = ("Dropping extraneous columns: "
"{}").format(cols_str)
print_function(msg_str)
df = df.drop(missing_set, 1)
msg_str = "Writing to table: {}".format(dbname)
print_function(msg_str)
df.to_sql(full_dict["table"],
database._engine,
schema=var_schema,
if_exists="append",
index=False,
chunksize=50000)
del(df)
if full_dict["children"] is not None:
if full_dict["autokey"]:
tab_dirs = _list_dirs(root_path, full_dict["table"])
fids = [int(x.split(full_dict["table"])[1]) for x in tab_dirs]
if not tab_dirs: continue
first_dir = tab_dirs.pop(0)
first_fid = fids.pop(0)
child_path = os.path.join(root_path, first_dir)
database_from_files(child_path,
full_dict["children"],
database,
var_schema,
first_fid,
print_function=print_function)
for next_tab_dir, next_fid in zip(tab_dirs, fids):
child_path = os.path.join(root_path, next_tab_dir)
database_from_files(child_path,
full_dict["children"],
database,
var_schema,
next_fid,
False,
print_function=print_function)
else:
child_path = os.path.join(root_path, full_dict["table"])
database_from_files(child_path,
full_dict["children"],
database,
var_schema,
truncate=truncate,
print_function=print_function)
return
def query_builder(table_list,
pid_list=None,
fid_list=None,
where_list=None,
schema=None):
def _add_schema(table_name, schema=None):
if schema is None:
dbname = table_name
else:
dbname = "{}.{}".format(schema, table_name)
return dbname
consume_list = table_list[:]
table_name = _add_schema(consume_list.pop(), schema)
# No joins or wheres
if pid_list is None:
query_str = "SELECT * FROM {};".format(table_name)
return query_str
table_shorts = ["t{}".format(i) for i in xrange(len(table_list))]
consume_shorts = table_shorts[:]
table_short = consume_shorts.pop()
query_str = "SELECT {0}.*\nFROM {1} {0}".format(table_short, table_name)
consume_pid = pid_list[:]
# Add joins
if fid_list is not None:
consume_fid = fid_list[:]
while consume_list:
table_fid = consume_fid.pop()
join_table_pid = consume_pid.pop()
join_table_name = _add_schema(consume_list.pop(), schema)
join_table_short = consume_shorts.pop()
query_str += ("\nJOIN {0} {1} ON {1}.{2} = "
"{3}.{4}").format(join_table_name,
join_table_short,
join_table_pid,
table_short,
table_fid)
table_short = join_table_short
# Add wheres
if where_list is not None:
where_str = None
for where_dict in where_list:
table_short = table_shorts[where_dict["table#"]]
table_pid = pid_list[where_dict["table#"]]
pid_value = where_dict["value"]
eq_str = "{}.{} = {}".format(table_short,
table_pid,
pid_value)
if where_str is None:
where_str = "\nWHERE " + eq_str
else:
where_str += " AND " + eq_str
query_str += where_str
query_str += ";"
return query_str
def convert_array(table_df, array_cols):
brackets = string.maketrans('[]','{}')
def _safe_square2curly(x):
if x is None:
return
else:
y = str(x).translate(brackets)
return y
for array_col in array_cols:
table_df[array_col] = table_df[array_col].apply(_safe_square2curly)
return table_df
def convert_bool(table_df, bool_cols):
def _safe_bool2str(x):
if x is None:
y = None
elif x:
y = "yes"
else:
y = "no"
return y
for bool_col in bool_cols:
table_df[bool_col] = table_df[bool_col].apply(_safe_bool2str)
return table_df
def convert_geo(table_df, geo_cols):
def _safe_to_wkt(x):
if x is None:
return
else:
geo_shape = wkb.loads(x, hex=True)
srid = geos.lgeos.GEOSGetSRID(geo_shape._geom)
if srid > 0:
result = "SRID={};{}".format(srid, geo_shape.wkt)
else:
result = geo_shape.wkt
return result
for geo_col in geo_cols:
table_df[geo_col] = table_df[geo_col].apply(_safe_to_wkt)
return table_df
def convert_time(table_df, time_cols):
def _safe_time2str(x):
if x is None:
return
else:
return x.strftime("%H:%M:%S")
for time_col in time_cols:
table_df[time_col] = table_df[time_col].apply(_safe_time2str)
return table_df
def check_dict(table_dict):
full_dict = {"array": None,
"autokey": False,
"bool": None,
"children": None,
"dummy": False,
"fkey": None,
"geo": None,
"pkey": None,
"schema": None,
"stripf": False,
"time": None}
full_dict.update(table_dict)
if "table" not in full_dict.keys():
errStr = ("Each definition requires a table name under the "
"'table' key.")
raise KeyError(errStr)
return full_dict
def get_table_map(map_name="table_map.yaml"):
# Load the yaml files
objdir = ObjDirectory(__name__, "..", "config")
table_yaml = objdir.get_path(map_name)
with open(table_yaml, "r") as f:
table_list = yaml.load(f, Loader=Loader)
return table_list
def filter_map(table_list, filter_name, parent=None):
copy_list = table_list[:]
for table_dict in copy_list:
full_dict = check_dict(table_dict)
table_name = full_dict["table"]
if filter_name == table_name:
if parent is not None:
parent["children"] = [full_dict]
return parent
else:
return table_dict
elif full_dict["children"] is not None:
result = filter_map(full_dict["children"],
filter_name,
table_dict)
if result is not None:
if parent is not None:
parent["children"] = [result]
return parent
else:
return result
return None
def draw_map(table_list, level=0):
map_str = ""
for table_dict in table_list:
full_dict = check_dict(table_dict)
if level > 0:
n_spaces = 2 * level - 1
level_marks = " " + " " * n_spaces + "|"
else:
level_marks = "|"
level_marks += "-"
map_str += "{} {}\n".format(level_marks, full_dict["table"])
if full_dict["children"] is not None:
map_str += draw_map(full_dict["children"], level + 1)
return map_str
def get_database_config(db_config_name="database.yaml"):
userconfigdir = UserDataDirectory("dtocean_core", "DTOcean", "config")
useryaml = ReadYAML(userconfigdir, db_config_name)
if userconfigdir.isfile(db_config_name):
configdir = userconfigdir
else:
configdir = ObjDirectory("dtocean_core", "config")
configyaml = ReadYAML(configdir, db_config_name)
config = configyaml.read()
return useryaml, config
def get_database(credentials,
echo=False,
timeout=None,
db_adapter="psycopg2"):
database = PostgreSQL(db_adapter)
database.set_credentials(credentials)
database.set_echo(echo)
database.set_timeout(timeout)
database.configure()
return database
def database_convert_parser():
'''Command line parser for database_to_files and database_from_files.
'''
desStr = "Convert DTOcean database to and from structured files"
epiStr = "Mathew Topper, Data Only Greater, (c) 2018"
parser = argparse.ArgumentParser(description=desStr,
epilog=epiStr,
formatter_class=SmartFormatter)
parser.add_argument("action",
choices=['dump', 'load', 'list', 'view', 'dir'],
help="R|Select an action, where\n"
" dump = export database to files\n"
" load = import files into database\n"
" list = print stored credentials identifiers\n"
" view = print stored credentials (using -i "
"option)\n"
" dir = print table structure")
parser.add_argument("-d", "--directory",
help=("directory to add or read files from. "
"Defaults to '.'"),
type=str,
default=".")
parser.add_argument("-s", "--section",
choices=['device', 'site', 'other'],
help="R|Only operate on section from\n"
" device = tables related to the OEC\n"
" site = tables related to the deployment site\n"
" other = tables related to the reference data")
parser.add_argument("-i", "--identifier",
help=("stored credentials identifier"),
type=str)
parser.add_argument("--host",
help=("override database host"),
type=str)
parser.add_argument("--name",
help=("override database name"),
type=str)
parser.add_argument("--user",
help=("override database username"),
type=str)
parser.add_argument("-p", "--pwd",
help=("override database password"),
type=str)
args = parser.parse_args()
result = {"action": args.action,
"root_path": args.directory,
"filter_table": args.section,
"db_id": args.identifier,
"db_host": args.host,
"db_name": args.name,
"db_user": args.user,
"db_pwd": args.pwd}
return result
def database_convert_interface():
'''Command line interface for database_to_files and database_from_files.
Example:
To get help::
$ dtocean-database -h
'''
request = database_convert_parser()
_, config = get_database_config()
# List the available database configurations
if request["action"] == "list":
id_str = ", ".join(config.keys())
if id_str:
msg_str = ("Available database configuration identifiers are: "
"{}").format(id_str)
else:
msg_str = "No database configurations are stored"
print(msg_str)
return
if request["action"] == "view":
if request["db_id"] is None:
err_msg = "Option '-i' must be specified with 'view' action"
raise ValueError(err_msg)
cred = config[request["db_id"]]
for k, v in cred.iteritems():
print('{:>8} :: {}'.format(k, v))
return
table_list = get_table_map()
# Filter the table if required
if request["filter_table"] is not None:
filtered_dict = filter_map(table_list, request["filter_table"])
table_list = [filtered_dict]
if request["action"] == "dir":
print("\n" + draw_map(table_list))
return
# Set up the DB
if request["db_id"] is not None:
cred = config[request["db_id"]]
else:
cred = {"host": None,
"dbname": None,
"user": None,
"pwd": None}
if request["db_host"] is not None:
cred["host"] = request["db_host"]
if request["db_name"] is not None:
cred["dbname"] = request["db_name"]
if request["db_user"] is not None:
cred["user"] = request["db_user"]
if request["db_pwd"] is not None:
cred["pwd"] = "postgres"
db = get_database(cred, timeout=60)
if request["action"] == "dump":
# make a directory if required
if not os.path.exists(request["root_path"]):
os.makedirs(request["root_path"])
database_to_files(request["root_path"],
table_list,
db)
return
if request["action"] == "load":
database_from_files(request["root_path"],
table_list,
db)
return
raise RuntimeError("Highly illogical...")
return
| gpl-3.0 |
ablifedev/ABLIRC | ABLIRC/bin/Clip-Seq/ABLIFE/cal_nonintergenic_region.py | 1 | 10587 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
####################################################################################
### Copyright (C) 2015-2019 by ABLIFE
####################################################################################
####################################################################################
####################################################################################
# Date Version Author ChangeLog
#
#
#####################################################################################
"""
程序功能说明:
1.计算gene表达量
2.randCheck_gene
3.randCheck_mRNA
程序设计思路:
利用gffutils和HTSeq包进行统计
"""
import re, os, sys, logging, time, datetime
from optparse import OptionParser, OptionGroup
reload(sys)
sys.setdefaultencoding('utf-8')
import subprocess
import threading
import gffutils
import HTSeq
import numpy
import multiprocessing
import signal
from matplotlib import pyplot
sys.path.insert(1, os.path.split(os.path.realpath(__file__))[0] + "/../../")
# print(sys.path)
from ablib.utils.tools import *
if sys.version_info < (2, 7):
print("Python Version error: please use phthon2.7")
sys.exit(-1)
_version = 'v0.1'
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def configOpt():
"""Init for option
"""
usage = 'Usage: %prog [-f] [other option] [-h]'
p = OptionParser(usage)
##basic options
p.add_option('-g', '--gff', dest='gff', action='store', type='string', help='gff file,do not have to provide it if db is exited')
p.add_option('-i', '--info', dest='info', action='store', type='string', help='chr length info')
p.add_option('-d', '--db', dest='db', default='gffdb', action='store', type='string', help='the gff database file to create or use')
p.add_option('-o', '--outfile', dest='outfile', default='intergenic.txt', action='store', type='string', help='intergenic.txt')
group = OptionGroup(p, "Preset options")
##preset options
group.add_option('-O', '--outDir', dest='outDir', default='./', action='store', type='string', help='output directory', metavar="DIR")
group.add_option('-L', '--logDir', dest='logDir', default='', action='store', type='string', help='log dir ,default is same as outDir')
group.add_option('-P', '--logPrefix', dest='logPrefix', default='', action='store', type='string', help='log file prefix')
group.add_option('-E', '--email', dest='email', default='none', action='store', type='string', help='email address, if you want get a email when this job is finished,default is no email', metavar="EMAIL")
group.add_option('-Q', '--quiet', dest='quiet', default=False, action='store_true', help='do not print messages to stdout')
group.add_option('-K', '--keepTemp', dest='keepTemp', default=False, action='store_true', help='keep temp dir')
group.add_option('-T', '--test', dest='isTest', default=False, action='store_true', help='run this program for test')
p.add_option_group(group)
if len(sys.argv) == 1:
p.print_help()
sys.exit(1)
opt, args = p.parse_args()
return (p, opt, args)
def listToString(x):
"""获得完整的命令
"""
rVal = ''
for a in x:
rVal += a + ' '
return rVal
opt_parser, opt, args = configOpt()
if opt.logDir == "":
opt.logDir = opt.outDir + '/log/'
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
scriptPath = os.path.abspath(os.path.dirname(__file__)) # absolute script path
binPath = scriptPath + '/bin' # absolute bin path
outPath = os.path.abspath(opt.outDir) # absolute output path
os.mkdir(outPath) if not os.path.isdir(outPath) else None
logPath = os.path.abspath(opt.logDir)
os.mkdir(logPath) if not os.path.isdir(logPath) else None
tempPath = outPath + '/temp/' # absolute bin path
# os.mkdir(tempPath) if not os.path.isdir(tempPath) else None
resultPath = outPath + '/result/'
# os.mkdir(resultPath) if not os.path.isdir(resultPath) else None
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def initLogging(logFilename):
"""Init for logging
"""
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s : %(levelname)s] %(message)s', datefmt='%y-%m-%d %H:%M', filename=logFilename, filemode='w')
if not opt.quiet:
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('[%(asctime)s : %(levelname)s] %(message)s', datefmt='%y-%m-%d %H:%M')
# tell the handler to use this format
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
dt = datetime.datetime.now()
logFile = logPath + '/' + opt.logPrefix + 'log.' + str(dt.strftime('%Y%m%d.%H%M%S.%f')) + '.txt'
initLogging(logFile)
logging.debug(sys.modules[__name__].__doc__)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
logging.debug('Program version: %s' % _version)
logging.debug('Start the program with [%s]\n', listToString(sys.argv))
startTime = datetime.datetime.now()
logging.debug("计时器:Program start at %s" % startTime)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
### S
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
### E
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def main():
print("Main procedure start...")
if opt.gff:
db = gffutils.create_db(opt.gff, opt.db, merge_strategy="create_unique", verbose=False, force=True)
db = gffutils.FeatureDB(opt.db)
ga = HTSeq.GenomicArray("auto", stranded=True, typecode='i')
genes = ('gene','lincRNA_gene','miRNA_gene','mt_gene','processed_pseudogene','pseudogene','rRNA_gene','snoRNA_gene','snRNA_gene')
for gene in db.features_of_type(genes):
gu_iv = HTSeq.GenomicInterval(gene.seqid, gene.start - 1, gene.end, gene.strand)
ga[gu_iv]=1
with open(opt.outfile, 'w') as o:
for line in open(opt.info):
if line.startswith('#'): continue
if line.startswith('\n'): continue
line = line.strip().split('\t')
ext_iv = HTSeq.GenomicInterval(line[0], 0, int(line[1]), "+")
for iv, value in ga[ext_iv].steps():
if value == 1:
o.writelines(line[0] + '\t' + str(iv.start+1) + '\t' + str(iv.end) + '\tnoninter\t0\t' + '+' + '\n')
ext_iv = HTSeq.GenomicInterval(line[0], 0, int(line[1]), "-")
for iv, value in ga[ext_iv].steps():
if value == 1:
o.writelines(line[0] + '\t' + str(iv.start+1) + '\t' + str(iv.end) + '\tnoninter\t0\t' + '-' + '\n')
if __name__ == '__main__':
main()
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
if not opt.keepTemp:
os.system('rm -rf ' + tempPath)
logging.debug("Temp folder is deleted..")
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
logging.debug("Program ended")
currentTime = datetime.datetime.now()
runningTime = (currentTime - startTime).seconds # in seconds
logging.debug("计时器:Program start at %s" % startTime)
logging.debug("计时器:Program end at %s" % currentTime)
logging.debug("计时器:Program ran %.2d:%.2d:%.2d" % (runningTime / 3600, (runningTime % 3600) / 60, runningTime % 60))
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
if opt.email != "none":
run_cmd = listToString(sys.argv)
sendEmail(opt.email, str(startTime), str(currentTime), run_cmd, outPath)
logging.info("发送邮件通知到 %s" % opt.email)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
| mit |
runnersaw/Discrete-Hash-2015 | hash_function.py | 1 | 10801 | # By Sawyer, Griffin, and Rahil
# Custom hashing function for Olin College Discrete Math 2015
import sys
import math
import bitstring
from bitstring import BitArray, BitStream, ConstBitStream
from Crypto.Hash import MD5
from random import randint
import numpy
import matplotlib.pyplot as plt
def padBits(input, bitSize):
"""
This function will pad our input to a multiple of the block size
it will pad
"""
# if 0 bits, just return a block with 1000000000
if len(input.bin) == 0:
command = 'bin:1=1, bin:' + str(bitSize-1) + '=' + '0'*(bitSize-1)
return bitstring.pack(command)
#if length of the input is a multiple of block size,
length = len(input.bin)
remainder = length % bitSize
remainder = bitSize - remainder
if remainder == 0:
remainder = bitSize
zeroBits = remainder - 1
zeros = '0'*zeroBits
command = 'bin:'+str(length)+'='+input.bin+', bin:1=1, bin:' + str(zeroBits) + '=' + zeros
return bitstring.pack(command)
def leftShift(x,n):
"""
This function performs logical left shift
"""
for i in range(n):
bits = x.bin
finalBit = bits[0]
leftShiftedBits = (x<<1).bin
leftShiftedBits = leftShiftedBits[:-1] + finalBit
x = BitArray('0b'+leftShiftedBits)
return x
def rightShift(x,n):
"""
This function performs logical right shift
"""
for i in range(n):
bits = x.bin
finalBit = bits[len(bits)-1]
rightShiftedBits = (x>>1).bin
rightShiftedBits = finalBit + rightShiftedBits[1:]
x = BitArray('0b'+rightShiftedBits)
return x
def add(x,y,bits):
'''
We had to write a function to add two BitArrays because the '+' operator just concats them
'''
a = (x.uint + y.uint) % (2**bits)
return BitArray("uint:"+str(bits)+"="+str(a))
def truncate(x,n):
'''
This function takes a BitArray and truncates it to n bits
'''
bits = x.bin
newBits = bits[-n:]
return BitArray('0b'+newBits)
def j(x,y,z):
"""
This function does stuff inside of our compression
it should be (x v y) XOR (~z ^ y)
"""
a = (x | y) ^ (~z & y)
return a
def k(x,y,z):
"""
This function does stuff inside our compressionFunction
it should be (~x XOR y) v (z ^ ~x)
"""
a = (~x ^ y) | (z ^ ~x)
return a
def compressionFunction(input1, input2, bitSize):
"""
This function runs our compression function. The portion of code inside the
main for loop is our round function e(), which is run on input2, which is the
current block. The round function is looped through by the number of rounds
specified in the function (64 in this case). The round function utilizes addition,
shifts, and two functions j() and k(). At the end, the output of the round
function() is XORed with input1, which is the hashed version of the previous
block. The output of the XOR operation is returned by the function.
The bitSize input is necessary to split each block into four sub-blocks of the
correct size.
"""
alpha = 'abcd'
subBitSize = bitSize / 4
rounds = 64
for x in range(rounds):
blocks = {}
newBlocks = {}
for y in range(4):
blocks[alpha[y]] = input2[y*subBitSize:y*subBitSize+subBitSize]
shiftSize = subBitSize / 2 - 1
a_j = j(blocks['a'], blocks['b'], blocks['c'])
a_k = k(blocks['a'], a_j, blocks['d'])
newBlocks['a'] = add(a_k, blocks['b'], subBitSize)
newBlocks['b'] = blocks['a']
newBlocks['c'] = leftShift(blocks['d'], shiftSize)
newBlocks['d'] = add(blocks['b'], blocks['c'], subBitSize)
for z in range(4):
input2[z*subBitSize:z*subBitSize+subBitSize] = newBlocks[alpha[z]]
output = input1 ^ input2
return output
def merkle(messageBlock, bitSize, initialValue, padFunction, compressionFunction):
"""
The merkle calls our compression function multiple times
once for each message block
"""
# pad the bits
messageBlock = padFunction(messageBlock, bitSize)
#setup
prevState = initialValue
# loop through messages
numMessages = len(messageBlock.bin)/bitSize
for i in range(numMessages):
shortMessage = messageBlock[bitSize*i:bitSize*(i+1)] # get current message
prevState = compressionFunction(prevState, shortMessage, bitSize) # call compressionFunction
return prevState
def runMerkle(hashInput):
"""
This just runs the merkle given a certain input. It uses all of the global variables
defined in main to run the merkle function
"""
return merkle(hashInput, bitSize, iv, padBits, compressionFunction)
def percentSimilar(a,b):
'''
Returns the percentage of bits that are the same in a and b
'''
if len(a) != len(b):
print("Input same size numbers")
return
count = 0
for i in range(len(a)):
if (a[i] == b[i]):
count+=1
return float(count) / len(a)
def avalanche_test_compression(iters, bitSize):
"""
This function will test whether a given compression function produces good
avalanche effect. To do this we'll change one bit at random, roughly 50%
of the output bits should flip. In order to test this, we'll generate
a bunch of random bitstrings, pick random bit to flip for each one,
run the compression function, and do this many times in a row for each
bitstring. At the end we'll monitor the average % of bits that flipped,
as well as the minimum % and maximum % flipped
Inputs: iters = number of iterations to run
bitSize = the size of the input, please make this a power of 2
"""
similarPercents = []
prevState = BitArray('0b'+make_random_bitstring(bitSize))
#short array will be the same every time
shortMessage = BitArray('0b'+make_random_bitstring(bitSize))
#however many iterations of compression we want to do
for i in range(0,iters):
#now run compression on it
new_message = compressionFunction(prevState, shortMessage, bitSize)
#check how similar they are
percentSim = percentSimilar(new_message, prevState)
#add the percent similar to our list
similarPercents.append(percentSim)
#make the prev state the new message
prevState = new_message
#print similarPercents
print "compression avalanche percent for " + str(iters) + " tests is: "+str(numpy.mean(similarPercents))
return
def avalanche_test_merkle(iters):
"""
Run avalanche testing with our full merkle function, not just compression
"""
print "running merkle avalanche test"
similarPercents = []
for i in range(0,iters-1):
first_bitstring = BitArray('0b'+make_random_bitstring(bitSize))
flipped_first = flip_random_bit(first_bitstring)
interim_percent = percentSimilar(runMerkle(first_bitstring), runMerkle(flipped_first))
similarPercents.append(interim_percent)
print "merkle avalanche overall percent similar for " + str(iters) + " tests (custom merkle) is: " + str(numpy.mean(similarPercents))
print "merkle standard deviation for avalanche values (custom merkle) is: " + str(numpy.std(similarPercents))
#make a histogram of the data
plt.hist(similarPercents)
plt.title("Histogram of custom hash avalanche values")
plt.xlabel("Percent Similar")
plt.ylabel("Frequency")
plt.show()
print "merkle avalanche testing done"
def md5_bitstring_run(input_bitstring):
md5test = MD5.new()
md5test.update(make_random_bitstring(bitSize))
md5_hex = md5test.hexdigest()
md5_bitstring = BitArray('0x'+md5_hex)
return md5_bitstring
def avalanche_test_md5(iters):
"""
run the same avalanche test, but with md4 algorithm so that we can compare
to our custom algorithm
"""
print "running md5 avalanche test"
similarPercents = []
for i in range(0,iters-1):
first_bitstring = BitArray('0b'+make_random_bitstring(bitSize))
flipped_first = flip_random_bit(first_bitstring)
interim_percent = percentSimilar(md5_bitstring_run(first_bitstring), md5_bitstring_run(flipped_first))
similarPercents.append(interim_percent)
print "merkle avalanche overall percent similar for " + str(iters) + " tests (md5) is: " + str(numpy.mean(similarPercents))
print "merkle standard deviation for avalanche values (md5) is: " + str(numpy.std(similarPercents))
#make a histogram of the data
plt.hist(similarPercents)
plt.title("Histogram of custom hash avalanche values (md5)")
plt.xlabel("Percent Similar")
plt.ylabel("Frequency")
plt.show()
print "merkle avalanche testing done"
def flip_random_bit(first_bitstring):
"""
Selects a random bit from a bitstring and flips its value
"""
bits = first_bitstring.bin
flip_bit_index = randint(0,len(bits)-1)
new_bitstring = bits[0:flip_bit_index]
if first_bitstring[flip_bit_index]==0:
new_bitstring += '1'
else:
new_bitstring += '0'
new_bitstring += bits[flip_bit_index+1:]
return BitArray('0b'+new_bitstring)
def make_random_bitstring(length):
"""
Returns a string of bits of length next
you'll need to convert to a BitArray
"""
output_bitstring = "0"
for i in range(0,length-1):
#make a randint every time
output_bitstring += `randint(0,1)`
return output_bitstring
def collisionTest(digits):
'''
This function iterates through all possible values up to the number of digits
It saves these
'''
collisionDict = {}
numCollisions = 0
for i in range(digits):
numDigits = i+1
for j in range(2**numDigits):
hashInput = BitArray('uint:'+str(numDigits)+'='+str(j))
out = runMerkle(hashInput)
bin = out.bin
if out.bin in collisionDict:
collisionDict[out.bin][0] += 1
collisionDict[out.bin][1].append(hashInput.bin)
print("COLLISION")
numCollisions += 1
for i in range(len(collisionDict[out.bin][1])):
print(collisionDict[out.bin][1][i])
else:
collisionDict[out.bin] = [1, [hashInput.bin]]
print("Number collisions: "+str(numCollisions))
if __name__=="__main__":
bitSize = 32
iv = BitArray('0x0d84fee0')
avalanche_test_compression(100, bitSize)
avalanche_test_merkle(100)
avalanche_test_md5(100)
hashInput = BitArray('0x446973637265746520697320617765736f6d6521')
| mit |
joshlk/blosc_store | blosc_store/blst.py | 1 | 4455 | #!/usr/bin/env python
import os
import shutil
import pandas as pd
import bloscpack as bp
try:
import ujson as json
except:
import json
"""
For reading and writing to the blosc store blst format. Nuances that users should take note:
* Column names are always saved as strings irrespective of there original data-type (e.g. could be int)
* DataFrame index is currently not preserved
* Datatypes currently supported: strings, numerical, datetime, categorical (with string and numeric)
"""
def to_blst(df, path):
"""
Save a DataFrame using blst format
:param df: DataFrame to save
:type df: pandas.DataFrame
:param path: The path to save to the blst store to. It uses a directory but you are still recommended to use a
'.blst' extension
:type path: str
:return:
"""
# Ensure not multi-index
if isinstance(df.columns, pd.MultiIndex):
raise NotImplementedError("MultiIndex columns not supported")
# Delete directory if already exists and make folder
if os.path.isdir(path):
shutil.rmtree(path)
os.makedirs(path)
# Save column file
column_meta_file = os.path.join(path, 'columns.txt')
column_meta = df.dtypes.reset_index()
column_meta.columns = ['col', 'dtype']
column_meta.to_csv(column_meta_file, sep='\t', index=False)
# Save each column
for col in df.columns:
dtype = str(df.dtypes[col])
if dtype == 'object': # String
file = os.path.join(path, "{}.csv".format(col))
df[[col]].to_csv(file, index=False, header=False)
elif dtype == 'category':
meta_file = os.path.join(path, "{}.meta.json".format(col))
file = os.path.join(path, "{}.bp".format(col))
# Meta file which contains the categories
meta = {
'categories': df[col].cat.categories.tolist(),
'ordered': df[col].cat.ordered
}
with open(meta_file, 'w') as f:
json.dump(meta, f)
bp.pack_ndarray_file(df[col].cat.codes.values, file)
else: # Numeric and datetime dtype
file = os.path.join(path, "{}.bp".format(col))
bp.pack_ndarray_file(df[col].values, file)
def read_blst_columns(path):
"""
Read the columns and datatype of a blst store
:param path: Path to blst store
:return: DataFrame of columns and dtypes
"""
column_meta_file = os.path.join(path, 'columns.txt')
column_meta = pd.read_table(column_meta_file)
return column_meta
def read_blst(path, columns='ALL'):
"""
Read a blst data store and return a DataFrame
:param path: Path to blst data store. Give the directory location
:param columns: Which columns to read and in which order. Give 'ALL' to read all columns.
:return: Read data
:rtype: pandas.DataFrame
"""
# Check path
if not os.path.isdir(path):
raise IOError("Folder does not exist: {}".format(path))
# Read the columns
column_meta = read_blst_columns(path)
column_meta_dict = column_meta.set_index('col')['dtype'].to_dict()
# Check columns integrity
if columns != 'ALL':
for col in columns:
if col not in column_meta_dict:
raise KeyError("'{}' not a column".format(col))
else:
columns = column_meta['col']
# Read each column
for i, col in enumerate(columns):
dtype = column_meta_dict[col]
if dtype == 'object': # String
file = os.path.join(path, "{}.csv".format(col))
col_df = pd.read_csv(file, header=None, names=[col])
elif dtype == 'category':
meta_file = os.path.join(path, "{}.meta.json".format(col))
file = os.path.join(path, "{}.bp".format(col))
with open(meta_file, 'r') as f:
meta = json.load(f)
col_df = bp.unpack_ndarray_file(file)
col_df = pd.Categorical.from_codes(col_df, meta['categories'],
ordered=meta['ordered'])
col_df = pd.Series(col_df, name=col).to_frame()
else: # Numeric and datetime dtype
file = os.path.join(path, "{}.bp".format(col))
col_df = bp.unpack_ndarray_file(file)
col_df = pd.Series(col_df, name=col).to_frame()
if i == 0:
df = col_df
else:
df[col] = col_df
return df
| mit |
dwysocki/ASTP-601-602 | presentations/fourier-teaser/py/plot.py | 2 | 1520 | #!/usr/bin/env python3
import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
def main():
if len(sys.argv) != 5:
print("Usage: plot [phot file] [period] [star name] [output file]",
file=sys.stderr)
exit()
filename, period, name, output = sys.argv[1:]
period = float(period)
data = np.loadtxt(filename)
plot_lightcurve(name, data, period, output)
def plot_lightcurve(name, data, period, output):
# initialize Figure and Axes objects
fig, ax = plt.subplots(figsize=(6.97, 4.31))
# format the x- and y-axis
ax.invert_yaxis()
if period > 0:
ax.set_xlim(0,2)
# Plot points used
time, mag, error = data.T
if period > 0:
time = (time/period) % 1.0
time = np.hstack((time,1+time))
mag = np.hstack((mag,mag))
error = np.hstack((error,error))
points = ax.errorbar(time, mag, yerr=error,
color="darkblue",
ls='None',
ms=.01, mew=.01, capsize=0)
if period > 0:
ax.set_xlabel('Phase ({0:0.7} day period)'.format(period))
else:
ax.set_xlabel('Time (days)')
ax.set_ylabel('Magnitude')
ax.xaxis.set_minor_locator(AutoMinorLocator(5))
ax.yaxis.set_minor_locator(AutoMinorLocator(5))
ax.set_title(name)
fig.tight_layout(pad=0.1)
fig.savefig(output)
plt.close(fig)
if __name__ == "__main__":
exit(main())
| mit |
sanketloke/scikit-learn | sklearn/svm/classes.py | 3 | 40654 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
from ..utils.validation import _num_samples
from ..utils.multiclass import check_classification_targets
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to ``coef_``
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
``"ovr"`` trains n_classes one-vs-rest classifiers, while ``"crammer_singer"``
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If ``"crammer_singer"`` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
``[x, self.intercept_scaling]``,
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to ``class_weight[i]*C`` for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
``coef_`` is a readonly property derived from ``raw_coef_`` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation, liblinear, uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
check_classification_targets(y)
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive' (default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set ``epsilon=0``.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to pre-compute the kernel matrix from data matrices; that matrix
should be an array of shape ``(n_samples, n_samples)``.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') decision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
.. versionadded:: 0.17
*decision_function_shape='ovr'* is recommended.
.. versionchanged:: 0.17
Deprecated *decision_function_shape='ovo' and None*.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies as
``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') decision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
.. versionadded:: 0.17
*decision_function_shape='ovr'* is recommended.
.. versionchanged:: 0.17
Deprecated *decision_function_shape='ovo' and None*.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
gengliangwang/spark | python/run-tests.py | 15 | 13614 | #!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from argparse import ArgumentParser
import os
import re
import shutil
import subprocess
import sys
import tempfile
from threading import Thread, Lock
import time
import uuid
import queue as Queue
from multiprocessing import Manager
# Append `SPARK_HOME/dev` to the Python path so that we can import the sparktestsupport module
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../dev/"))
from sparktestsupport import SPARK_HOME # noqa (suppress pep8 warnings)
from sparktestsupport.shellutils import which, subprocess_check_output # noqa
from sparktestsupport.modules import all_modules, pyspark_sql # noqa
python_modules = dict((m.name, m) for m in all_modules if m.python_test_goals if m.name != 'root')
def print_red(text):
print('\033[31m' + text + '\033[0m')
SKIPPED_TESTS = None
LOG_FILE = os.path.join(SPARK_HOME, "python/unit-tests.log")
FAILURE_REPORTING_LOCK = Lock()
LOGGER = logging.getLogger()
# Find out where the assembly jars are located.
# TODO: revisit for Scala 2.13
for scala in ["2.12"]:
build_dir = os.path.join(SPARK_HOME, "assembly", "target", "scala-" + scala)
if os.path.isdir(build_dir):
SPARK_DIST_CLASSPATH = os.path.join(build_dir, "jars", "*")
break
else:
raise RuntimeError("Cannot find assembly build directory, please build Spark first.")
def run_individual_python_test(target_dir, test_name, pyspark_python):
env = dict(os.environ)
env.update({
'SPARK_DIST_CLASSPATH': SPARK_DIST_CLASSPATH,
'SPARK_TESTING': '1',
'SPARK_PREPEND_CLASSES': '1',
'PYSPARK_PYTHON': which(pyspark_python),
'PYSPARK_DRIVER_PYTHON': which(pyspark_python),
# Preserve legacy nested timezone behavior for pyarrow>=2, remove after SPARK-32285
'PYARROW_IGNORE_TIMEZONE': '1',
})
# Create a unique temp directory under 'target/' for each run. The TMPDIR variable is
# recognized by the tempfile module to override the default system temp directory.
tmp_dir = os.path.join(target_dir, str(uuid.uuid4()))
while os.path.isdir(tmp_dir):
tmp_dir = os.path.join(target_dir, str(uuid.uuid4()))
os.mkdir(tmp_dir)
env["TMPDIR"] = tmp_dir
metastore_dir = os.path.join(tmp_dir, str(uuid.uuid4()))
while os.path.isdir(metastore_dir):
metastore_dir = os.path.join(metastore_dir, str(uuid.uuid4()))
os.mkdir(metastore_dir)
# Also override the JVM's temp directory by setting driver and executor options.
java_options = "-Djava.io.tmpdir={0} -Dio.netty.tryReflectionSetAccessible=true".format(tmp_dir)
spark_args = [
"--conf", "spark.driver.extraJavaOptions='{0}'".format(java_options),
"--conf", "spark.executor.extraJavaOptions='{0}'".format(java_options),
"--conf", "spark.sql.warehouse.dir='{0}'".format(metastore_dir),
"pyspark-shell"
]
env["PYSPARK_SUBMIT_ARGS"] = " ".join(spark_args)
LOGGER.info("Starting test(%s): %s", pyspark_python, test_name)
start_time = time.time()
try:
per_test_output = tempfile.TemporaryFile()
retcode = subprocess.Popen(
[os.path.join(SPARK_HOME, "bin/pyspark")] + test_name.split(),
stderr=per_test_output, stdout=per_test_output, env=env).wait()
shutil.rmtree(tmp_dir, ignore_errors=True)
except:
LOGGER.exception("Got exception while running %s with %s", test_name, pyspark_python)
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(1)
duration = time.time() - start_time
# Exit on the first failure.
if retcode != 0:
try:
with FAILURE_REPORTING_LOCK:
with open(LOG_FILE, 'ab') as log_file:
per_test_output.seek(0)
log_file.writelines(per_test_output)
per_test_output.seek(0)
for line in per_test_output:
decoded_line = line.decode("utf-8", "replace")
if not re.match('[0-9]+', decoded_line):
print(decoded_line, end='')
per_test_output.close()
except:
LOGGER.exception("Got an exception while trying to print failed test output")
finally:
print_red("\nHad test failures in %s with %s; see logs." % (test_name, pyspark_python))
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
else:
skipped_counts = 0
try:
per_test_output.seek(0)
# Here expects skipped test output from unittest when verbosity level is
# 2 (or --verbose option is enabled).
decoded_lines = map(lambda line: line.decode("utf-8", "replace"), iter(per_test_output))
skipped_tests = list(filter(
lambda line: re.search(r'test_.* \(pyspark\..*\) ... (skip|SKIP)', line),
decoded_lines))
skipped_counts = len(skipped_tests)
if skipped_counts > 0:
key = (pyspark_python, test_name)
assert SKIPPED_TESTS is not None
SKIPPED_TESTS[key] = skipped_tests
per_test_output.close()
except:
import traceback
print_red("\nGot an exception while trying to store "
"skipped test output:\n%s" % traceback.format_exc())
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
if skipped_counts != 0:
LOGGER.info(
"Finished test(%s): %s (%is) ... %s tests were skipped", pyspark_python, test_name,
duration, skipped_counts)
else:
LOGGER.info(
"Finished test(%s): %s (%is)", pyspark_python, test_name, duration)
def get_default_python_executables():
python_execs = [x for x in ["python3.6", "pypy3"] if which(x)]
if "python3.6" not in python_execs:
p = which("python3")
if not p:
LOGGER.error("No python3 executable found. Exiting!")
os._exit(1)
else:
python_execs.insert(0, p)
return python_execs
def parse_opts():
parser = ArgumentParser(
prog="run-tests"
)
parser.add_argument(
"--python-executables", type=str, default=','.join(get_default_python_executables()),
help="A comma-separated list of Python executables to test against (default: %(default)s)"
)
parser.add_argument(
"--modules", type=str,
default=",".join(sorted(python_modules.keys())),
help="A comma-separated list of Python modules to test (default: %(default)s)"
)
parser.add_argument(
"-p", "--parallelism", type=int, default=4,
help="The number of suites to test in parallel (default %(default)d)"
)
parser.add_argument(
"--verbose", action="store_true",
help="Enable additional debug logging"
)
group = parser.add_argument_group("Developer Options")
group.add_argument(
"--testnames", type=str,
default=None,
help=(
"A comma-separated list of specific modules, classes and functions of doctest "
"or unittest to test. "
"For example, 'pyspark.sql.foo' to run the module as unittests or doctests, "
"'pyspark.sql.tests FooTests' to run the specific class of unittests, "
"'pyspark.sql.tests FooTests.test_foo' to run the specific unittest in the class. "
"'--modules' option is ignored if they are given.")
)
args, unknown = parser.parse_known_args()
if unknown:
parser.error("Unsupported arguments: %s" % ' '.join(unknown))
if args.parallelism < 1:
parser.error("Parallelism cannot be less than 1")
return args
def _check_coverage(python_exec):
# Make sure if coverage is installed.
try:
subprocess_check_output(
[python_exec, "-c", "import coverage"],
stderr=open(os.devnull, 'w'))
except:
print_red("Coverage is not installed in Python executable '%s' "
"but 'COVERAGE_PROCESS_START' environment variable is set, "
"exiting." % python_exec)
sys.exit(-1)
def main():
opts = parse_opts()
if opts.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
should_test_modules = opts.testnames is None
logging.basicConfig(stream=sys.stdout, level=log_level, format="%(message)s")
LOGGER.info("Running PySpark tests. Output is in %s", LOG_FILE)
if os.path.exists(LOG_FILE):
os.remove(LOG_FILE)
python_execs = opts.python_executables.split(',')
LOGGER.info("Will test against the following Python executables: %s", python_execs)
if should_test_modules:
modules_to_test = []
for module_name in opts.modules.split(','):
if module_name in python_modules:
modules_to_test.append(python_modules[module_name])
else:
print("Error: unrecognized module '%s'. Supported modules: %s" %
(module_name, ", ".join(python_modules)))
sys.exit(-1)
LOGGER.info("Will test the following Python modules: %s", [x.name for x in modules_to_test])
else:
testnames_to_test = opts.testnames.split(',')
LOGGER.info("Will test the following Python tests: %s", testnames_to_test)
task_queue = Queue.PriorityQueue()
for python_exec in python_execs:
# Check if the python executable has coverage installed when 'COVERAGE_PROCESS_START'
# environmental variable is set.
if "COVERAGE_PROCESS_START" in os.environ:
_check_coverage(python_exec)
python_implementation = subprocess_check_output(
[python_exec, "-c", "import platform; print(platform.python_implementation())"],
universal_newlines=True).strip()
LOGGER.info("%s python_implementation is %s", python_exec, python_implementation)
LOGGER.info("%s version is: %s", python_exec, subprocess_check_output(
[python_exec, "--version"], stderr=subprocess.STDOUT, universal_newlines=True).strip())
if should_test_modules:
for module in modules_to_test:
if python_implementation not in module.excluded_python_implementations:
for test_goal in module.python_test_goals:
heavy_tests = ['pyspark.streaming.tests', 'pyspark.mllib.tests',
'pyspark.tests', 'pyspark.sql.tests', 'pyspark.ml.tests',
'pyspark.pandas.tests']
if any(map(lambda prefix: test_goal.startswith(prefix), heavy_tests)):
priority = 0
else:
priority = 100
task_queue.put((priority, (python_exec, test_goal)))
else:
for test_goal in testnames_to_test:
task_queue.put((0, (python_exec, test_goal)))
# Create the target directory before starting tasks to avoid races.
target_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'target'))
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
def process_queue(task_queue):
while True:
try:
(priority, (python_exec, test_goal)) = task_queue.get_nowait()
except Queue.Empty:
break
try:
run_individual_python_test(target_dir, test_goal, python_exec)
finally:
task_queue.task_done()
start_time = time.time()
for _ in range(opts.parallelism):
worker = Thread(target=process_queue, args=(task_queue,))
worker.daemon = True
worker.start()
try:
task_queue.join()
except (KeyboardInterrupt, SystemExit):
print_red("Exiting due to interrupt")
sys.exit(-1)
total_duration = time.time() - start_time
LOGGER.info("Tests passed in %i seconds", total_duration)
for key, lines in sorted(SKIPPED_TESTS.items()):
pyspark_python, test_name = key
LOGGER.info("\nSkipped tests in %s with %s:" % (test_name, pyspark_python))
for line in lines:
LOGGER.info(" %s" % line.rstrip())
if __name__ == "__main__":
SKIPPED_TESTS = Manager().dict()
main()
| apache-2.0 |
danny200309/BuildingMachineLearningSystemsWithPython | ch03/rel_post_20news.py | 24 | 3903 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import sklearn.datasets
import scipy as sp
new_post = \
"""Disk drive problems. Hi, I have a problem with my hard disk.
After 1 year it is working only sporadically now.
I tried to format it, but now it doesn't boot any more.
Any ideas? Thanks.
"""
print("""\
Dear reader of the 1st edition of 'Building Machine Learning Systems with Python'!
For the 2nd edition we introduced a couple of changes that will result into
results that differ from the results in the 1st edition.
E.g. we now fully rely on scikit's fetch_20newsgroups() instead of requiring
you to download the data manually from MLCOMP.
If you have any questions, please ask at http://www.twotoreal.com
""")
all_data = sklearn.datasets.fetch_20newsgroups(subset="all")
print("Number of total posts: %i" % len(all_data.filenames))
# Number of total posts: 18846
groups = [
'comp.graphics', 'comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware',
'comp.sys.mac.hardware', 'comp.windows.x', 'sci.space']
train_data = sklearn.datasets.fetch_20newsgroups(subset="train",
categories=groups)
print("Number of training posts in tech groups:", len(train_data.filenames))
# Number of training posts in tech groups: 3529
labels = train_data.target
num_clusters = 50 # sp.unique(labels).shape[0]
import nltk.stem
english_stemmer = nltk.stem.SnowballStemmer('english')
from sklearn.feature_extraction.text import TfidfVectorizer
class StemmedTfidfVectorizer(TfidfVectorizer):
def build_analyzer(self):
analyzer = super(TfidfVectorizer, self).build_analyzer()
return lambda doc: (english_stemmer.stem(w) for w in analyzer(doc))
vectorizer = StemmedTfidfVectorizer(min_df=10, max_df=0.5,
stop_words='english', decode_error='ignore'
)
vectorized = vectorizer.fit_transform(train_data.data)
num_samples, num_features = vectorized.shape
print("#samples: %d, #features: %d" % (num_samples, num_features))
# samples: 3529, #features: 4712
from sklearn.cluster import KMeans
km = KMeans(n_clusters=num_clusters, n_init=1, verbose=1, random_state=3)
clustered = km.fit(vectorized)
print("km.labels_=%s" % km.labels_)
# km.labels_=[ 6 34 22 ..., 2 21 26]
print("km.labels_.shape=%s" % km.labels_.shape)
# km.labels_.shape=3529
from sklearn import metrics
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
# Homogeneity: 0.400
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
# Completeness: 0.206
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
# V-measure: 0.272
print("Adjusted Rand Index: %0.3f" %
metrics.adjusted_rand_score(labels, km.labels_))
# Adjusted Rand Index: 0.064
print("Adjusted Mutual Information: %0.3f" %
metrics.adjusted_mutual_info_score(labels, km.labels_))
# Adjusted Mutual Information: 0.197
print(("Silhouette Coefficient: %0.3f" %
metrics.silhouette_score(vectorized, labels, sample_size=1000)))
# Silhouette Coefficient: 0.006
new_post_vec = vectorizer.transform([new_post])
new_post_label = km.predict(new_post_vec)[0]
similar_indices = (km.labels_ == new_post_label).nonzero()[0]
similar = []
for i in similar_indices:
dist = sp.linalg.norm((new_post_vec - vectorized[i]).toarray())
similar.append((dist, train_data.data[i]))
similar = sorted(similar)
print("Count similar: %i" % len(similar))
show_at_1 = similar[0]
show_at_2 = similar[int(len(similar) / 10)]
show_at_3 = similar[int(len(similar) / 2)]
print("=== #1 ===")
print(show_at_1)
print()
print("=== #2 ===")
print(show_at_2)
print()
print("=== #3 ===")
print(show_at_3)
| mit |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.