repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
manipopopo/tensorflow
|
tensorflow/tools/dist_test/python/census_widendeep.py
|
48
|
11896
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distributed training and evaluation of a wide and deep model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import sys
from six.moves import urllib
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.contrib.learn.python.learn.estimators import run_config
# Constants: Data download URLs
TRAIN_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.data"
TEST_DATA_URL = "http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.test"
# Define features for the model
def census_model_config():
"""Configuration for the census Wide & Deep model.
Returns:
columns: Column names to retrieve from the data source
label_column: Name of the label column
wide_columns: List of wide columns
deep_columns: List of deep columns
categorical_column_names: Names of the categorical columns
continuous_column_names: Names of the continuous columns
"""
# 1. Categorical base columns.
gender = tf.contrib.layers.sparse_column_with_keys(
column_name="gender", keys=["female", "male"])
race = tf.contrib.layers.sparse_column_with_keys(
column_name="race",
keys=["Amer-Indian-Eskimo",
"Asian-Pac-Islander",
"Black",
"Other",
"White"])
education = tf.contrib.layers.sparse_column_with_hash_bucket(
"education", hash_bucket_size=1000)
marital_status = tf.contrib.layers.sparse_column_with_hash_bucket(
"marital_status", hash_bucket_size=100)
relationship = tf.contrib.layers.sparse_column_with_hash_bucket(
"relationship", hash_bucket_size=100)
workclass = tf.contrib.layers.sparse_column_with_hash_bucket(
"workclass", hash_bucket_size=100)
occupation = tf.contrib.layers.sparse_column_with_hash_bucket(
"occupation", hash_bucket_size=1000)
native_country = tf.contrib.layers.sparse_column_with_hash_bucket(
"native_country", hash_bucket_size=1000)
# 2. Continuous base columns.
age = tf.contrib.layers.real_valued_column("age")
age_buckets = tf.contrib.layers.bucketized_column(
age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
education_num = tf.contrib.layers.real_valued_column("education_num")
capital_gain = tf.contrib.layers.real_valued_column("capital_gain")
capital_loss = tf.contrib.layers.real_valued_column("capital_loss")
hours_per_week = tf.contrib.layers.real_valued_column("hours_per_week")
wide_columns = [
gender, native_country, education, occupation, workclass,
marital_status, relationship, age_buckets,
tf.contrib.layers.crossed_column([education, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([native_country, occupation],
hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([age_buckets, race, occupation],
hash_bucket_size=int(1e6))]
deep_columns = [
tf.contrib.layers.embedding_column(workclass, dimension=8),
tf.contrib.layers.embedding_column(education, dimension=8),
tf.contrib.layers.embedding_column(marital_status, dimension=8),
tf.contrib.layers.embedding_column(gender, dimension=8),
tf.contrib.layers.embedding_column(relationship, dimension=8),
tf.contrib.layers.embedding_column(race, dimension=8),
tf.contrib.layers.embedding_column(native_country, dimension=8),
tf.contrib.layers.embedding_column(occupation, dimension=8),
age, education_num, capital_gain, capital_loss, hours_per_week]
# Define the column names for the data sets.
columns = ["age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week",
"native_country", "income_bracket"]
label_column = "label"
categorical_columns = ["workclass", "education", "marital_status",
"occupation", "relationship", "race", "gender",
"native_country"]
continuous_columns = ["age", "education_num", "capital_gain",
"capital_loss", "hours_per_week"]
return (columns, label_column, wide_columns, deep_columns,
categorical_columns, continuous_columns)
class CensusDataSource(object):
"""Source of census data."""
def __init__(self, data_dir, train_data_url, test_data_url,
columns, label_column,
categorical_columns, continuous_columns):
"""Constructor of CensusDataSource.
Args:
data_dir: Directory to save/load the data files
train_data_url: URL from which the training data can be downloaded
test_data_url: URL from which the test data can be downloaded
columns: Columns to retrieve from the data files (A list of strings)
label_column: Name of the label column
categorical_columns: Names of the categorical columns (A list of strings)
continuous_columns: Names of the continuous columns (A list of strings)
"""
# Retrieve data from disk (if available) or download from the web.
train_file_path = os.path.join(data_dir, "adult.data")
if os.path.isfile(train_file_path):
print("Loading training data from file: %s" % train_file_path)
train_file = open(train_file_path)
else:
urllib.urlretrieve(train_data_url, train_file_path)
test_file_path = os.path.join(data_dir, "adult.test")
if os.path.isfile(test_file_path):
print("Loading test data from file: %s" % test_file_path)
test_file = open(test_file_path)
else:
test_file = open(test_file_path)
urllib.urlretrieve(test_data_url, test_file_path)
# Read the training and testing data sets into Pandas DataFrame.
import pandas # pylint: disable=g-import-not-at-top
self._df_train = pandas.read_csv(train_file, names=columns,
skipinitialspace=True)
self._df_test = pandas.read_csv(test_file, names=columns,
skipinitialspace=True, skiprows=1)
# Remove the NaN values in the last rows of the tables
self._df_train = self._df_train[:-1]
self._df_test = self._df_test[:-1]
# Apply the threshold to get the labels.
income_thresh = lambda x: ">50K" in x
self._df_train[label_column] = (
self._df_train["income_bracket"].apply(income_thresh)).astype(int)
self._df_test[label_column] = (
self._df_test["income_bracket"].apply(income_thresh)).astype(int)
self.label_column = label_column
self.categorical_columns = categorical_columns
self.continuous_columns = continuous_columns
def input_train_fn(self):
return self._input_fn(self._df_train)
def input_test_fn(self):
return self._input_fn(self._df_test)
# TODO(cais): Turn into minibatch feeder
def _input_fn(self, df):
"""Input data function.
Creates a dictionary mapping from each continuous feature column name
(k) to the values of that column stored in a constant Tensor.
Args:
df: data feed
Returns:
feature columns and labels
"""
continuous_cols = {k: tf.constant(df[k].values)
for k in self.continuous_columns}
# Creates a dictionary mapping from each categorical feature column name (k)
# to the values of that column stored in a tf.SparseTensor.
categorical_cols = {
k: tf.SparseTensor(
indices=[[i, 0] for i in range(df[k].size)],
values=df[k].values,
dense_shape=[df[k].size, 1])
for k in self.categorical_columns}
# Merges the two dictionaries into one.
feature_cols = dict(continuous_cols.items() + categorical_cols.items())
# Converts the label column into a constant Tensor.
label = tf.constant(df[self.label_column].values)
# Returns the feature columns and the label.
return feature_cols, label
def _create_experiment_fn(output_dir): # pylint: disable=unused-argument
"""Experiment creation function."""
(columns, label_column, wide_columns, deep_columns, categorical_columns,
continuous_columns) = census_model_config()
census_data_source = CensusDataSource(FLAGS.data_dir,
TRAIN_DATA_URL, TEST_DATA_URL,
columns, label_column,
categorical_columns,
continuous_columns)
os.environ["TF_CONFIG"] = json.dumps({
"cluster": {
tf.contrib.learn.TaskType.PS: ["fake_ps"] *
FLAGS.num_parameter_servers
},
"task": {
"index": FLAGS.worker_index
}
})
config = run_config.RunConfig(master=FLAGS.master_grpc_url)
estimator = tf.contrib.learn.DNNLinearCombinedClassifier(
model_dir=FLAGS.model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[5],
config=config)
return tf.contrib.learn.Experiment(
estimator=estimator,
train_input_fn=census_data_source.input_train_fn,
eval_input_fn=census_data_source.input_test_fn,
train_steps=FLAGS.train_steps,
eval_steps=FLAGS.eval_steps
)
def main(unused_argv):
print("Worker index: %d" % FLAGS.worker_index)
learn_runner.run(experiment_fn=_create_experiment_fn,
output_dir=FLAGS.output_dir,
schedule=FLAGS.schedule)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--data_dir",
type=str,
default="/tmp/census-data",
help="Directory for storing the census data")
parser.add_argument(
"--model_dir",
type=str,
default="/tmp/census_wide_and_deep_model",
help="Directory for storing the model"
)
parser.add_argument(
"--output_dir",
type=str,
default="",
help="Base output directory."
)
parser.add_argument(
"--schedule",
type=str,
default="local_run",
help="Schedule to run for this experiment."
)
parser.add_argument(
"--master_grpc_url",
type=str,
default="",
help="URL to master GRPC tensorflow server, e.g.,grpc://127.0.0.1:2222"
)
parser.add_argument(
"--num_parameter_servers",
type=int,
default=0,
help="Number of parameter servers"
)
parser.add_argument(
"--worker_index",
type=int,
default=0,
help="Worker index (>=0)"
)
parser.add_argument(
"--train_steps",
type=int,
default=1000,
help="Number of training steps"
)
parser.add_argument(
"--eval_steps",
type=int,
default=1,
help="Number of evaluation steps"
)
global FLAGS # pylint:disable=global-at-module-level
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
apache-2.0
|
warmspringwinds/scikit-image
|
doc/ext/notebook.py
|
44
|
3042
|
__all__ = ['python_to_notebook', 'Notebook']
import json
import copy
import warnings
# Skeleton notebook in JSON format
skeleton_nb = """{
"metadata": {
"name":""
},
"nbformat": 3,
"nbformat_minor": 0,
"worksheets": [
{
"cells": [
{
"cell_type": "code",
"collapsed": false,
"input": [
"%matplotlib inline"
],
"language": "python",
"metadata": {},
"outputs": []
}
],
"metadata": {}
}
]
}"""
class Notebook(object):
"""
Notebook object for building an IPython notebook cell-by-cell.
"""
def __init__(self):
# cell type code
self.cell_code = {
'cell_type': 'code',
'collapsed': False,
'input': [
'# Code Goes Here'
],
'language': 'python',
'metadata': {},
'outputs': []
}
# cell type markdown
self.cell_md = {
'cell_type': 'markdown',
'metadata': {},
'source': [
'Markdown Goes Here'
]
}
self.template = json.loads(skeleton_nb)
self.cell_type = {'input': self.cell_code, 'source': self.cell_md}
self.valuetype_to_celltype = {'code': 'input', 'markdown': 'source'}
def add_cell(self, value, cell_type='code'):
"""Add a notebook cell.
Parameters
----------
value : str
Cell content.
cell_type : {'code', 'markdown'}
Type of content (default is 'code').
"""
if cell_type in ['markdown', 'code']:
key = self.valuetype_to_celltype[cell_type]
cells = self.template['worksheets'][0]['cells']
cells.append(copy.deepcopy(self.cell_type[key]))
# assign value to the last cell
cells[-1][key] = value
else:
warnings.warn('Ignoring unsupported cell type (%s)' % cell_type)
def json(self):
"""Return a JSON representation of the notebook.
Returns
-------
str
JSON notebook.
"""
return json.dumps(self.template, indent=2)
def test_notebook_basic():
nb = Notebook()
assert(json.loads(nb.json()) == json.loads(skeleton_nb))
def test_notebook_add():
nb = Notebook()
str1 = 'hello world'
str2 = 'f = lambda x: x * x'
nb.add_cell(str1, cell_type='markdown')
nb.add_cell(str2, cell_type='code')
d = json.loads(nb.json())
cells = d['worksheets'][0]['cells']
values = [c['input'] if c['cell_type'] == 'code' else c['source']
for c in cells]
assert values[1] == str1
assert values[2] == str2
assert cells[1]['cell_type'] == 'markdown'
assert cells[2]['cell_type'] == 'code'
if __name__ == "__main__":
import numpy.testing as npt
npt.run_module_suite()
|
bsd-3-clause
|
vmayoral/basic_reinforcement_learning
|
tutorial6/examples/Catch/test.py
|
1
|
1236
|
import json
import matplotlib.pyplot as plt
import numpy as np
from keras.models import model_from_json
from qlearn import Catch
if __name__ == "__main__":
# Make sure this grid size matches the value used fro training
grid_size = 10
with open("model.json", "r") as jfile:
model = model_from_json(json.load(jfile))
model.load_weights("model.h5")
model.compile("sgd", "mse")
# Define environment, game
env = Catch(grid_size)
c = 0
for e in range(10):
loss = 0.
env.reset()
game_over = False
# get initial input
input_t = env.observe()
plt.imshow(input_t.reshape((grid_size,)*2),
interpolation='none', cmap='gray')
plt.savefig("%03d.png" % c)
c += 1
while not game_over:
input_tm1 = input_t
# get next action
q = model.predict(input_tm1)
action = np.argmax(q[0])
# apply action, get rewards and new state
input_t, reward, game_over = env.act(action)
plt.imshow(input_t.reshape((grid_size,)*2),
interpolation='none', cmap='gray')
plt.savefig("%03d.png" % c)
c += 1
|
gpl-3.0
|
shahankhatch/scikit-learn
|
examples/cluster/plot_birch_vs_minibatchkmeans.py
|
333
|
3694
|
"""
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
|
bsd-3-clause
|
andyfaff/scipy
|
scipy/stats/stats.py
|
3
|
310601
|
# Copyright 2002 Gary Strangman. All rights reserved
# Copyright 2002-2016 The SciPy Developers
#
# The original code from Gary Strangman was heavily adapted for
# use in SciPy by Travis Oliphant. The original code came with the
# following disclaimer:
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
"""
A collection of basic statistical functions for Python.
References
----------
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
import warnings
import math
from math import gcd
from collections import namedtuple
from itertools import permutations
import numpy as np
from numpy import array, asarray, ma
from scipy.spatial.distance import cdist
from scipy.ndimage import measurements
from scipy._lib._util import (check_random_state, MapWrapper,
rng_integers, float_factorial)
import scipy.special as special
from scipy import linalg
from . import distributions
from . import mstats_basic
from ._stats_mstats_common import (_find_repeats, linregress, theilslopes,
siegelslopes)
from ._stats import (_kendall_dis, _toint64, _weightedrankedtau,
_local_correlations)
from dataclasses import make_dataclass
# Functions/classes in other files should be added in `__init__.py`, not here
__all__ = ['find_repeats', 'gmean', 'hmean', 'mode', 'tmean', 'tvar',
'tmin', 'tmax', 'tstd', 'tsem', 'moment', 'variation',
'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',
'normaltest', 'jarque_bera', 'itemfreq',
'scoreatpercentile', 'percentileofscore',
'cumfreq', 'relfreq', 'obrientransform',
'sem', 'zmap', 'zscore', 'iqr', 'gstd', 'median_absolute_deviation',
'median_abs_deviation',
'sigmaclip', 'trimboth', 'trim1', 'trim_mean',
'f_oneway', 'F_onewayConstantInputWarning',
'F_onewayBadInputSizesWarning',
'PearsonRConstantInputWarning', 'PearsonRNearConstantInputWarning',
'pearsonr', 'fisher_exact',
'SpearmanRConstantInputWarning', 'spearmanr', 'pointbiserialr',
'kendalltau', 'weightedtau', 'multiscale_graphcorr',
'linregress', 'siegelslopes', 'theilslopes', 'ttest_1samp',
'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel',
'kstest', 'ks_1samp', 'ks_2samp',
'chisquare', 'power_divergence',
'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
'rankdata',
'combine_pvalues', 'wasserstein_distance', 'energy_distance',
'brunnermunzel', 'alexandergovern']
def _contains_nan(a, nan_policy='propagate'):
policies = ['propagate', 'raise', 'omit']
if nan_policy not in policies:
raise ValueError("nan_policy must be one of {%s}" %
', '.join("'%s'" % s for s in policies))
try:
# Calling np.sum to avoid creating a huge array into memory
# e.g. np.isnan(a).any()
with np.errstate(invalid='ignore'):
contains_nan = np.isnan(np.sum(a))
except TypeError:
# This can happen when attempting to sum things which are not
# numbers (e.g. as in the function `mode`). Try an alternative method:
try:
contains_nan = np.nan in set(a.ravel())
except TypeError:
# Don't know what to do. Fall back to omitting nan values and
# issue a warning.
contains_nan = False
nan_policy = 'omit'
warnings.warn("The input array could not be properly "
"checked for nan values. nan values "
"will be ignored.", RuntimeWarning)
if contains_nan and nan_policy == 'raise':
raise ValueError("The input contains nan values")
return contains_nan, nan_policy
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
return a, outaxis
def _chk2_asarray(a, b, axis):
if axis is None:
a = np.ravel(a)
b = np.ravel(b)
outaxis = 0
else:
a = np.asarray(a)
b = np.asarray(b)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
if b.ndim == 0:
b = np.atleast_1d(b)
return a, b, outaxis
def _shape_with_dropped_axis(a, axis):
"""
Given an array `a` and an integer `axis`, return the shape
of `a` with the `axis` dimension removed.
Examples
--------
>>> a = np.zeros((3, 5, 2))
>>> _shape_with_dropped_axis(a, 1)
(3, 2)
"""
shp = list(a.shape)
try:
del shp[axis]
except IndexError:
raise np.AxisError(axis, a.ndim) from None
return tuple(shp)
def _broadcast_shapes(shape1, shape2):
"""
Given two shapes (i.e. tuples of integers), return the shape
that would result from broadcasting two arrays with the given
shapes.
Examples
--------
>>> _broadcast_shapes((2, 1), (4, 1, 3))
(4, 2, 3)
"""
d = len(shape1) - len(shape2)
if d <= 0:
shp1 = (1,)*(-d) + shape1
shp2 = shape2
else:
shp1 = shape1
shp2 = (1,)*d + shape2
shape = []
for n1, n2 in zip(shp1, shp2):
if n1 == 1:
n = n2
elif n2 == 1 or n1 == n2:
n = n1
else:
raise ValueError(f'shapes {shape1} and {shape2} could not be '
'broadcast together')
shape.append(n)
return tuple(shape)
def _broadcast_shapes_with_dropped_axis(a, b, axis):
"""
Given two arrays `a` and `b` and an integer `axis`, find the
shape of the broadcast result after dropping `axis` from the
shapes of `a` and `b`.
Examples
--------
>>> a = np.zeros((5, 2, 1))
>>> b = np.zeros((1, 9, 3))
>>> _broadcast_shapes_with_dropped_axis(a, b, 1)
(5, 3)
"""
shp1 = _shape_with_dropped_axis(a, axis)
shp2 = _shape_with_dropped_axis(b, axis)
try:
shp = _broadcast_shapes(shp1, shp2)
except ValueError:
raise ValueError(f'non-axis shapes {shp1} and {shp2} could not be '
'broadcast together') from None
return shp
def gmean(a, axis=0, dtype=None, weights=None):
"""Compute the geometric mean along the specified axis.
Return the geometric average of the array elements.
That is: n-th root of (x1 * x2 * ... * xn)
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the geometric mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If dtype is not specified, it defaults to the
dtype of a, unless a has an integer dtype with a precision less than
that of the default platform integer. In that case, the default
platform integer is used.
weights : array_like, optional
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given `axis`) or of the same shape as `a`.
Default is None, which gives each value a weight of 1.0.
Returns
-------
gmean : ndarray
See `dtype` parameter above.
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
hmean : Harmonic mean
Notes
-----
The geometric average is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity because masked
arrays automatically mask any non-finite values.
References
----------
.. [1] "Weighted Geometric Mean", *Wikipedia*, https://en.wikipedia.org/wiki/Weighted_geometric_mean.
Examples
--------
>>> from scipy.stats import gmean
>>> gmean([1, 4])
2.0
>>> gmean([1, 2, 3, 4, 5, 6, 7])
3.3800151591412964
"""
if not isinstance(a, np.ndarray):
# if not an ndarray object attempt to convert it
log_a = np.log(np.array(a, dtype=dtype))
elif dtype:
# Must change the default dtype allowing array type
if isinstance(a, np.ma.MaskedArray):
log_a = np.log(np.ma.asarray(a, dtype=dtype))
else:
log_a = np.log(np.asarray(a, dtype=dtype))
else:
log_a = np.log(a)
if weights is not None:
weights = np.asanyarray(weights, dtype=dtype)
return np.exp(np.average(log_a, axis=axis, weights=weights))
def hmean(a, axis=0, dtype=None):
"""Calculate the harmonic mean along the specified axis.
That is: n / (1/x1 + 1/x2 + ... + 1/xn)
Parameters
----------
a : array_like
Input array, masked array or object that can be converted to an array.
axis : int or None, optional
Axis along which the harmonic mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults to the
dtype of `a`, unless `a` has an integer `dtype` with a precision less
than that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
hmean : ndarray
See `dtype` parameter above.
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
gmean : Geometric mean
Notes
-----
The harmonic mean is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity.
Examples
--------
>>> from scipy.stats import hmean
>>> hmean([1, 4])
1.6000000000000001
>>> hmean([1, 2, 3, 4, 5, 6, 7])
2.6997245179063363
"""
if not isinstance(a, np.ndarray):
a = np.array(a, dtype=dtype)
if np.all(a >= 0):
# Harmonic mean only defined if greater than or equal to to zero.
if isinstance(a, np.ma.MaskedArray):
size = a.count(axis)
else:
if axis is None:
a = a.ravel()
size = a.shape[0]
else:
size = a.shape[axis]
with np.errstate(divide='ignore'):
return size / np.sum(1.0 / a, axis=axis, dtype=dtype)
else:
raise ValueError("Harmonic mean only defined if all elements greater "
"than or equal to zero")
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
def mode(a, axis=0, nan_policy='propagate'):
"""Return an array of the modal (most common) value in the passed array.
If there is more than one such value, only the smallest is returned.
The bin-count for the modal bins is also returned.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
mode : ndarray
Array of modal values.
count : ndarray
Array of counts for each mode.
Examples
--------
>>> a = np.array([[6, 8, 3, 0],
... [3, 2, 1, 7],
... [8, 1, 8, 4],
... [5, 3, 0, 5],
... [4, 7, 5, 9]])
>>> from scipy import stats
>>> stats.mode(a)
ModeResult(mode=array([[3, 1, 0, 0]]), count=array([[1, 1, 1, 1]]))
To get mode of whole array, specify ``axis=None``:
>>> stats.mode(a, axis=None)
ModeResult(mode=array([3]), count=array([3]))
"""
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return ModeResult(np.array([]), np.array([]))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.mode(a, axis)
if a.dtype == object and np.nan in set(a.ravel()):
# Fall back to a slower method since np.unique does not work with NaN
scores = set(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape, dtype=a.dtype)
oldcounts = np.zeros(testshape, dtype=int)
for score in scores:
template = (a == score)
counts = np.sum(template, axis, keepdims=True)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return ModeResult(mostfrequent, oldcounts)
def _mode1D(a):
vals, cnts = np.unique(a, return_counts=True)
return vals[cnts.argmax()], cnts.max()
# np.apply_along_axis will convert the _mode1D tuples to a numpy array,
# casting types in the process.
# This recreates the results without that issue
# View of a, rotated so the requested axis is last
in_dims = list(range(a.ndim))
a_view = np.transpose(a, in_dims[:axis] + in_dims[axis+1:] + [axis])
inds = np.ndindex(a_view.shape[:-1])
modes = np.empty(a_view.shape[:-1], dtype=a.dtype)
counts = np.empty(a_view.shape[:-1], dtype=np.int_)
for ind in inds:
modes[ind], counts[ind] = _mode1D(a_view[ind])
newshape = list(a.shape)
newshape[axis] = 1
return ModeResult(modes.reshape(newshape), counts.reshape(newshape))
def _mask_to_limits(a, limits, inclusive):
"""Mask an array for values outside of given limits.
This is primarily a utility function.
Parameters
----------
a : array
limits : (float or None, float or None)
A tuple consisting of the (lower limit, upper limit). Values in the
input array less than the lower limit or greater than the upper limit
will be masked out. None implies no limit.
inclusive : (bool, bool)
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to lower or upper are allowed.
Returns
-------
A MaskedArray.
Raises
------
A ValueError if there are no values within the given limits.
"""
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True), axis=None):
"""Compute the trimmed mean.
This function finds the arithmetic mean of given values, ignoring values
outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to compute test. Default is None.
Returns
-------
tmean : float
Trimmed mean.
See Also
--------
trim_mean : Returns mean after trimming a proportion from both tails.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmean(x)
9.5
>>> stats.tmean(x, (3,17))
10.0
"""
a = asarray(a)
if limits is None:
return np.mean(a, None)
am = _mask_to_limits(a.ravel(), limits, inclusive)
return am.mean(axis=axis)
def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""Compute the trimmed variance.
This function computes the sample variance of an array of values,
while ignoring values which are outside of given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tvar : float
Trimmed variance.
Notes
-----
`tvar` computes the unbiased sample variance, i.e. it uses a correction
factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tvar(x)
35.0
>>> stats.tvar(x, (3,17))
20.0
"""
a = asarray(a)
a = a.astype(float)
if limits is None:
return a.var(ddof=ddof, axis=axis)
am = _mask_to_limits(a, limits, inclusive)
amnan = am.filled(fill_value=np.nan)
return np.nanvar(amnan, ddof=ddof, axis=axis)
def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""Compute the trimmed minimum.
This function finds the miminum value of an array `a` along the
specified axis, but only considering values greater than a specified
lower limit.
Parameters
----------
a : array_like
Array of values.
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
tmin : float, int or ndarray
Trimmed minimum.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmin(x)
0
>>> stats.tmin(x, 13)
13
>>> stats.tmin(x, 13, inclusive=False)
14
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (lowerlimit, None), (inclusive, False))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.minimum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""Compute the trimmed maximum.
This function computes the maximum value of an array along a given axis,
while ignoring values larger than a specified upper limit.
Parameters
----------
a : array_like
Array of values.
upperlimit : None or float, optional
Values in the input array greater than the given limit will be ignored.
When upperlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the upper limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
tmax : float, int or ndarray
Trimmed maximum.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmax(x)
19
>>> stats.tmax(x, 13)
13
>>> stats.tmax(x, 13, inclusive=False)
12
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (None, upperlimit), (False, inclusive))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.maximum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""Compute the trimmed sample standard deviation.
This function finds the sample standard deviation of given values,
ignoring values outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tstd : float
Trimmed sample standard deviation.
Notes
-----
`tstd` computes the unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tstd(x)
5.9160797830996161
>>> stats.tstd(x, (3,17))
4.4721359549995796
"""
return np.sqrt(tvar(a, limits, inclusive, axis, ddof))
def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""Compute the trimmed standard error of the mean.
This function finds the standard error of the mean for given
values, ignoring values outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tsem : float
Trimmed standard error of the mean.
Notes
-----
`tsem` uses unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tsem(x)
1.3228756555322954
>>> stats.tsem(x, (3,17))
1.1547005383792515
"""
a = np.asarray(a).ravel()
if limits is None:
return a.std(ddof=ddof) / np.sqrt(a.size)
am = _mask_to_limits(a, limits, inclusive)
sd = np.sqrt(np.ma.var(am, ddof=ddof, axis=axis))
return sd / np.sqrt(am.count())
#####################################
# MOMENTS #
#####################################
def moment(a, moment=1, axis=0, nan_policy='propagate'):
r"""Calculate the nth moment about the mean for a sample.
A moment is a specific quantitative measure of the shape of a set of
points. It is often used to calculate coefficients of skewness and kurtosis
due to its close relationship with them.
Parameters
----------
a : array_like
Input array.
moment : int or array_like of ints, optional
Order of central moment that is returned. Default is 1.
axis : int or None, optional
Axis along which the central moment is computed. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
n-th central moment : ndarray or float
The appropriate moment along the given axis or over all values if axis
is None. The denominator for the moment calculation is the number of
observations, no degrees of freedom correction is done.
See Also
--------
kurtosis, skew, describe
Notes
-----
The k-th central moment of a data sample is:
.. math::
m_k = \frac{1}{n} \sum_{i = 1}^n (x_i - \bar{x})^k
Where n is the number of samples and x-bar is the mean. This function uses
exponentiation by squares [1]_ for efficiency.
References
----------
.. [1] https://eli.thegreenplace.net/2009/03/21/efficient-integer-exponentiation-algorithms
Examples
--------
>>> from scipy.stats import moment
>>> moment([1, 2, 3, 4, 5], moment=1)
0.0
>>> moment([1, 2, 3, 4, 5], moment=2)
2.0
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.moment(a, moment, axis)
if a.size == 0:
moment_shape = list(a.shape)
del moment_shape[axis]
dtype = a.dtype.type if a.dtype.kind in 'fc' else np.float64
# empty array, return nan(s) with shape matching `moment`
out_shape = (moment_shape if np.isscalar(moment)
else [len(moment)] + moment_shape)
if len(out_shape) == 0:
return dtype(np.nan)
else:
return np.full(out_shape, np.nan, dtype=dtype)
# for array_like moment input, return a value for each.
if not np.isscalar(moment):
mean = a.mean(axis, keepdims=True)
mmnt = [_moment(a, i, axis, mean=mean) for i in moment]
return np.array(mmnt)
else:
return _moment(a, moment, axis)
# Moment with optional pre-computed mean, equal to a.mean(axis, keepdims=True)
def _moment(a, moment, axis, *, mean=None):
if np.abs(moment - np.round(moment)) > 0:
raise ValueError("All moment parameters must be integers")
if moment == 0 or moment == 1:
# By definition the zeroth moment about the mean is 1, and the first
# moment is 0.
shape = list(a.shape)
del shape[axis]
dtype = a.dtype.type if a.dtype.kind in 'fc' else np.float64
if len(shape) == 0:
return dtype(1.0 if moment == 0 else 0.0)
else:
return (np.ones(shape, dtype=dtype) if moment == 0
else np.zeros(shape, dtype=dtype))
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n - 1) / 2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
mean = a.mean(axis, keepdims=True) if mean is None else mean
a_zero_mean = a - mean
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return np.mean(s, axis)
def variation(a, axis=0, nan_policy='propagate', ddof=0):
"""Compute the coefficient of variation.
The coefficient of variation is the standard deviation divided by the
mean. This function is equivalent to::
np.std(x, axis=axis, ddof=ddof) / np.mean(x)
The default for ``ddof`` is 0, but many definitions of the coefficient
of variation use the square root of the unbiased sample variance
for the sample standard deviation, which corresponds to ``ddof=1``.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate the coefficient of variation. Default
is 0. If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
ddof : int, optional
Delta degrees of freedom. Default is 0.
Returns
-------
variation : ndarray
The calculated variation along the requested axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Examples
--------
>>> from scipy.stats import variation
>>> variation([1, 2, 3, 4, 5])
0.47140452079103173
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.variation(a, axis, ddof)
return a.std(axis, ddof=ddof) / a.mean(axis)
def skew(a, axis=0, bias=True, nan_policy='propagate'):
r"""Compute the sample skewness of a data set.
For normally distributed data, the skewness should be about zero. For
unimodal continuous distributions, a skewness value greater than zero means
that there is more weight in the right tail of the distribution. The
function `skewtest` can be used to determine if the skewness value
is close enough to zero, statistically speaking.
Parameters
----------
a : ndarray
Input array.
axis : int or None, optional
Axis along which skewness is calculated. Default is 0.
If None, compute over the whole array `a`.
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
skewness : ndarray
The skewness of values along an axis, returning 0 where all values are
equal.
Notes
-----
The sample skewness is computed as the Fisher-Pearson coefficient
of skewness, i.e.
.. math::
g_1=\frac{m_3}{m_2^{3/2}}
where
.. math::
m_i=\frac{1}{N}\sum_{n=1}^N(x[n]-\bar{x})^i
is the biased sample :math:`i\texttt{th}` central moment, and
:math:`\bar{x}` is
the sample mean. If ``bias`` is False, the calculations are
corrected for bias and the value computed is the adjusted
Fisher-Pearson standardized moment coefficient, i.e.
.. math::
G_1=\frac{k_3}{k_2^{3/2}}=
\frac{\sqrt{N(N-1)}}{N-2}\frac{m_3}{m_2^{3/2}}.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 2.2.24.1
Examples
--------
>>> from scipy.stats import skew
>>> skew([1, 2, 3, 4, 5])
0.0
>>> skew([2, 8, 0, 4, 1, 9, 9, 0])
0.2650554122698573
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skew(a, axis, bias)
mean = a.mean(axis, keepdims=True)
m2 = _moment(a, 2, axis, mean=mean)
m3 = _moment(a, 3, axis, mean=mean)
with np.errstate(all='ignore'):
zero = (m2 <= (np.finfo(m2.dtype).resolution * mean.squeeze(axis))**2)
vals = np.where(zero, 0, m3 / m2**1.5)
if not bias:
can_correct = ~zero & (n > 2)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = np.sqrt((n - 1.0) * n) / (n - 2.0) * m3 / m2**1.5
np.place(vals, can_correct, nval)
if vals.ndim == 0:
return vals.item()
return vals
def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate'):
"""Compute the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
Use `kurtosistest` to see if result is close enough to normal.
Parameters
----------
a : array
Data for which the kurtosis is calculated.
axis : int or None, optional
Axis along which the kurtosis is calculated. Default is 0.
If None, compute over the whole array `a`.
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
kurtosis : array
The kurtosis of values along an axis. If all values are equal,
return -3 for Fisher's definition and 0 for Pearson's definition.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Examples
--------
In Fisher's definiton, the kurtosis of the normal distribution is zero.
In the following example, the kurtosis is close to zero, because it was
calculated from the dataset, not from the continuous distribution.
>>> from scipy.stats import norm, kurtosis
>>> data = norm.rvs(size=1000, random_state=3)
>>> kurtosis(data)
-0.06928694200380558
The distribution with a higher kurtosis has a heavier tail.
The zero valued kurtosis of the normal distribution in Fisher's definition
can serve as a reference point.
>>> import matplotlib.pyplot as plt
>>> import scipy.stats as stats
>>> from scipy.stats import kurtosis
>>> x = np.linspace(-5, 5, 100)
>>> ax = plt.subplot()
>>> distnames = ['laplace', 'norm', 'uniform']
>>> for distname in distnames:
... if distname == 'uniform':
... dist = getattr(stats, distname)(loc=-2, scale=4)
... else:
... dist = getattr(stats, distname)
... data = dist.rvs(size=1000)
... kur = kurtosis(data, fisher=True)
... y = dist.pdf(x)
... ax.plot(x, y, label="{}, {}".format(distname, round(kur, 3)))
... ax.legend()
The Laplace distribution has a heavier tail than the normal distribution.
The uniform distribution (which has negative kurtosis) has the thinnest
tail.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosis(a, axis, fisher, bias)
n = a.shape[axis]
mean = a.mean(axis, keepdims=True)
m2 = _moment(a, 2, axis, mean=mean)
m4 = _moment(a, 4, axis, mean=mean)
with np.errstate(all='ignore'):
zero = (m2 <= (np.finfo(m2.dtype).resolution * mean.squeeze(axis))**2)
vals = np.where(zero, 0, m4 / m2**2.0)
if not bias:
can_correct = ~zero & (n > 3)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
np.place(vals, can_correct, nval + 3.0)
if vals.ndim == 0:
vals = vals.item() # array scalar
return vals - 3 if fisher else vals
DescribeResult = namedtuple('DescribeResult',
('nobs', 'minmax', 'mean', 'variance', 'skewness',
'kurtosis'))
def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'):
"""Compute several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Input data.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
ddof : int, optional
Delta degrees of freedom (only for variance). Default is 1.
bias : bool, optional
If False, then the skewness and kurtosis calculations are corrected
for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
nobs : int or ndarray of ints
Number of observations (length of data along `axis`).
When 'omit' is chosen as nan_policy, the length along each axis
slice is counted separately.
minmax: tuple of ndarrays or floats
Minimum and maximum value of `a` along the given axis.
mean : ndarray or float
Arithmetic mean of `a` along the given axis.
variance : ndarray or float
Unbiased variance of `a` along the given axis; denominator is number
of observations minus one.
skewness : ndarray or float
Skewness of `a` along the given axis, based on moment calculations
with denominator equal to the number of observations, i.e. no degrees
of freedom correction.
kurtosis : ndarray or float
Kurtosis (Fisher) of `a` along the given axis. The kurtosis is
normalized so that it is zero for the normal distribution. No
degrees of freedom are used.
See Also
--------
skew, kurtosis
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10)
>>> stats.describe(a)
DescribeResult(nobs=10, minmax=(0, 9), mean=4.5,
variance=9.166666666666666, skewness=0.0,
kurtosis=-1.2242424242424244)
>>> b = [[1, 2], [3, 4]]
>>> stats.describe(b)
DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])),
mean=array([2., 3.]), variance=array([2., 2.]),
skewness=array([0., 0.]), kurtosis=array([-2., -2.]))
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.describe(a, axis, ddof, bias)
if a.size == 0:
raise ValueError("The input must not be empty.")
n = a.shape[axis]
mm = (np.min(a, axis=axis), np.max(a, axis=axis))
m = np.mean(a, axis=axis)
v = np.var(a, axis=axis, ddof=ddof)
sk = skew(a, axis, bias=bias)
kurt = kurtosis(a, axis, bias=bias)
return DescribeResult(n, mm, m, v, sk, kurt)
#####################################
# NORMALITY TESTS #
#####################################
def _normtest_finish(z, alternative):
"""Common code between all the normality-test functions."""
if alternative == 'less':
prob = distributions.norm.cdf(z)
elif alternative == 'greater':
prob = distributions.norm.sf(z)
elif alternative == 'two-sided':
prob = 2 * distributions.norm.sf(np.abs(z))
else:
raise ValueError("alternative must be "
"'less', 'greater' or 'two-sided'")
if z.ndim == 0:
z = z[()]
return z, prob
SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
def skewtest(a, axis=0, nan_policy='propagate', alternative='two-sided'):
"""Test whether the skew is different from the normal distribution.
This function tests the null hypothesis that the skewness of
the population that the sample was drawn from is the same
as that of a corresponding normal distribution.
Parameters
----------
a : array
The data to be tested.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': the skewness of the distribution underlying the sample
is different from that of the normal distribution (i.e. 0)
* 'less': the skewness of the distribution underlying the sample
is less than that of the normal distribution
* 'greater': the skewness of the distribution underlying the sample
is greater than that of the normal distribution
.. versionadded:: 1.7.0
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The p-value for the hypothesis test.
Notes
-----
The sample size must be at least 8.
References
----------
.. [1] R. B. D'Agostino, A. J. Belanger and R. B. D'Agostino Jr.,
"A suggestion for using powerful and informative tests of
normality", American Statistician 44, pp. 316-321, 1990.
Examples
--------
>>> from scipy.stats import skewtest
>>> skewtest([1, 2, 3, 4, 5, 6, 7, 8])
SkewtestResult(statistic=1.0108048609177787, pvalue=0.3121098361421897)
>>> skewtest([2, 8, 0, 4, 1, 9, 9, 0])
SkewtestResult(statistic=0.44626385374196975, pvalue=0.6554066631275459)
>>> skewtest([1, 2, 3, 4, 5, 6, 7, 8000])
SkewtestResult(statistic=3.571773510360407, pvalue=0.0003545719905823133)
>>> skewtest([100, 100, 100, 100, 100, 100, 100, 101])
SkewtestResult(statistic=3.5717766638478072, pvalue=0.000354567720281634)
>>> skewtest([1, 2, 3, 4, 5, 6, 7, 8], alternative='less')
SkewtestResult(statistic=1.0108048609177787, pvalue=0.8439450819289052)
>>> skewtest([1, 2, 3, 4, 5, 6, 7, 8], alternative='greater')
SkewtestResult(statistic=1.0108048609177787, pvalue=0.15605491807109484)
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
if alternative != 'two-sided':
raise ValueError("nan-containing/masked inputs with "
"nan_policy='omit' are currently not "
"supported by one-sided alternatives.")
a = ma.masked_invalid(a)
return mstats_basic.skewtest(a, axis)
if axis is None:
a = np.ravel(a)
axis = 0
b2 = skew(a, axis)
n = a.shape[axis]
if n < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % int(n))
y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) /
((n-2.0) * (n+5) * (n+7) * (n+9)))
W2 = -1 + math.sqrt(2 * (beta2 - 1))
delta = 1 / math.sqrt(0.5 * math.log(W2))
alpha = math.sqrt(2.0 / (W2 - 1))
y = np.where(y == 0, 1, y)
Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1))
return SkewtestResult(*_normtest_finish(Z, alternative))
KurtosistestResult = namedtuple('KurtosistestResult', ('statistic', 'pvalue'))
def kurtosistest(a, axis=0, nan_policy='propagate', alternative='two-sided'):
"""Test whether a dataset has normal kurtosis.
This function tests the null hypothesis that the kurtosis
of the population from which the sample was drawn is that
of the normal distribution.
Parameters
----------
a : array
Array of the sample data.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the kurtosis of the distribution underlying the sample
is different from that of the normal distribution
* 'less': the kurtosis of the distribution underlying the sample
is less than that of the normal distribution
* 'greater': the kurtosis of the distribution underlying the sample
is greater than that of the normal distribution
.. versionadded:: 1.7.0
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The p-value for the hypothesis test.
Notes
-----
Valid only for n>20. This function uses the method described in [1]_.
References
----------
.. [1] see e.g. F. J. Anscombe, W. J. Glynn, "Distribution of the kurtosis
statistic b2 for normal samples", Biometrika, vol. 70, pp. 227-234, 1983.
Examples
--------
>>> from scipy.stats import kurtosistest
>>> kurtosistest(list(range(20)))
KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.08804338332528348)
>>> kurtosistest(list(range(20)), alternative='less')
KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.04402169166264174)
>>> kurtosistest(list(range(20)), alternative='greater')
KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.9559783083373583)
>>> rng = np.random.default_rng()
>>> s = rng.normal(0, 1, 1000)
>>> kurtosistest(s)
KurtosistestResult(statistic=-1.475047944490622, pvalue=0.14019965402996987)
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
if alternative != 'two-sided':
raise ValueError("nan-containing/masked inputs with "
"nan_policy='omit' are currently not "
"supported by one-sided alternatives.")
a = ma.masked_invalid(a)
return mstats_basic.kurtosistest(a, axis)
n = a.shape[axis]
if n < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % int(n))
if n < 20:
warnings.warn("kurtosistest only valid for n>=20 ... continuing "
"anyway, n=%i" % int(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5)) # [1]_ Eq. 1
x = (b2-E) / np.sqrt(varb2) # [1]_ Eq. 4
# [1]_ Eq. 2:
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
# [1]_ Eq. 3:
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2/(9.0*A)
denom = 1 + x*np.sqrt(2/(A-4.0))
term2 = np.sign(denom) * np.where(denom == 0.0, np.nan,
np.power((1-2.0/A)/np.abs(denom), 1/3.0))
if np.any(denom == 0):
msg = "Test statistic not defined in some cases due to division by " \
"zero. Return nan in that case..."
warnings.warn(msg, RuntimeWarning)
Z = (term1 - term2) / np.sqrt(2/(9.0*A)) # [1]_ Eq. 5
# zprob uses upper tail, so Z needs to be positive
return KurtosistestResult(*_normtest_finish(Z, alternative))
NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
def normaltest(a, axis=0, nan_policy='propagate'):
"""Test whether a sample differs from a normal distribution.
This function tests the null hypothesis that a sample comes
from a normal distribution. It is based on D'Agostino and
Pearson's [1]_, [2]_ test that combines skew and kurtosis to
produce an omnibus test of normality.
Parameters
----------
a : array_like
The array containing the sample to be tested.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float or array
``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and
``k`` is the z-score returned by `kurtosistest`.
pvalue : float or array
A 2-sided chi squared probability for the hypothesis test.
References
----------
.. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for
moderate and large sample size", Biometrika, 58, 341-348
.. [2] D'Agostino, R. and Pearson, E. S. (1973), "Tests for departure from
normality", Biometrika, 60, 613-622
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> pts = 1000
>>> a = rng.normal(0, 1, size=pts)
>>> b = rng.normal(2, 1, size=pts)
>>> x = np.concatenate((a, b))
>>> k2, p = stats.normaltest(x)
>>> alpha = 1e-3
>>> print("p = {:g}".format(p))
p = 8.4713e-19
>>> if p < alpha: # null hypothesis: x comes from a normal distribution
... print("The null hypothesis can be rejected")
... else:
... print("The null hypothesis cannot be rejected")
The null hypothesis can be rejected
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.normaltest(a, axis)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
return NormaltestResult(k2, distributions.chi2.sf(k2, 2))
Jarque_beraResult = namedtuple('Jarque_beraResult', ('statistic', 'pvalue'))
def jarque_bera(x):
"""Perform the Jarque-Bera goodness of fit test on sample data.
The Jarque-Bera test tests whether the sample data has the skewness and
kurtosis matching a normal distribution.
Note that this test only works for a large enough number of data samples
(>2000) as the test statistic asymptotically has a Chi-squared distribution
with 2 degrees of freedom.
Parameters
----------
x : array_like
Observations of a random variable.
Returns
-------
jb_value : float
The test statistic.
p : float
The p-value for the hypothesis test.
References
----------
.. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality,
homoscedasticity and serial independence of regression residuals",
6 Econometric Letters 255-259.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> x = rng.normal(0, 1, 100000)
>>> jarque_bera_test = stats.jarque_bera(x)
>>> jarque_bera_test
Jarque_beraResult(statistic=3.3415184718131554, pvalue=0.18810419594996775)
>>> jarque_bera_test.statistic
3.3415184718131554
>>> jarque_bera_test.pvalue
0.18810419594996775
"""
x = np.asarray(x)
n = x.size
if n == 0:
raise ValueError('At least one observation is required.')
mu = x.mean()
diffx = x - mu
skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.)
kurtosis = (1 / n * np.sum(diffx**4)) / (1 / n * np.sum(diffx**2))**2
jb_value = n / 6 * (skewness**2 + (kurtosis - 3)**2 / 4)
p = 1 - distributions.chi2.cdf(jb_value, 2)
return Jarque_beraResult(jb_value, p)
#####################################
# FREQUENCY FUNCTIONS #
#####################################
# deindent to work around numpy/gh-16202
@np.deprecate(
message="`itemfreq` is deprecated and will be removed in a "
"future version. Use instead `np.unique(..., return_counts=True)`")
def itemfreq(a):
"""
Return a 2-D array of item frequencies.
Parameters
----------
a : (N,) array_like
Input array.
Returns
-------
itemfreq : (K, 2) ndarray
A 2-D frequency table. Column 1 contains sorted, unique values from
`a`, column 2 contains their respective counts.
Examples
--------
>>> from scipy import stats
>>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4])
>>> stats.itemfreq(a)
array([[ 0., 2.],
[ 1., 4.],
[ 2., 2.],
[ 4., 1.],
[ 5., 1.]])
>>> np.bincount(a)
array([2, 4, 2, 0, 1, 1])
>>> stats.itemfreq(a/10.)
array([[ 0. , 2. ],
[ 0.1, 4. ],
[ 0.2, 2. ],
[ 0.4, 1. ],
[ 0.5, 1. ]])
"""
items, inv = np.unique(a, return_inverse=True)
freq = np.bincount(inv)
return np.array([items, freq]).T
def scoreatpercentile(a, per, limit=(), interpolation_method='fraction',
axis=None):
"""Calculate the score at a given percentile of the input sequence.
For example, the score at `per=50` is the median. If the desired quantile
lies between two data points, we interpolate between them, according to
the value of `interpolation`. If the parameter `limit` is provided, it
should be a tuple (lower, upper) of two values.
Parameters
----------
a : array_like
A 1-D array of values from which to extract score.
per : array_like
Percentile(s) at which to extract score. Values should be in range
[0,100].
limit : tuple, optional
Tuple of two scalars, the lower and upper limits within which to
compute the percentile. Values of `a` outside
this (closed) interval will be ignored.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
Specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`
The following options are available (default is 'fraction'):
* 'fraction': ``i + (j - i) * fraction`` where ``fraction`` is the
fractional part of the index surrounded by ``i`` and ``j``
* 'lower': ``i``
* 'higher': ``j``
axis : int, optional
Axis along which the percentiles are computed. Default is None. If
None, compute over the whole array `a`.
Returns
-------
score : float or ndarray
Score at percentile(s).
See Also
--------
percentileofscore, numpy.percentile
Notes
-----
This function will become obsolete in the future.
For NumPy 1.9 and higher, `numpy.percentile` provides all the functionality
that `scoreatpercentile` provides. And it's significantly faster.
Therefore it's recommended to use `numpy.percentile` for users that have
numpy >= 1.9.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
# adapted from NumPy's percentile function. When we require numpy >= 1.8,
# the implementation of this function can be replaced by np.percentile.
a = np.asarray(a)
if a.size == 0:
# empty array, return nan(s) with shape matching `per`
if np.isscalar(per):
return np.nan
else:
return np.full(np.asarray(per).shape, np.nan, dtype=np.float64)
if limit:
a = a[(limit[0] <= a) & (a <= limit[1])]
sorted_ = np.sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted_, per, interpolation_method, axis)
# handle sequence of per's without calling sort multiple times
def _compute_qth_percentile(sorted_, per, interpolation_method, axis):
if not np.isscalar(per):
score = [_compute_qth_percentile(sorted_, i,
interpolation_method, axis)
for i in per]
return np.array(score)
if not (0 <= per <= 100):
raise ValueError("percentile must be in the range [0, 100]")
indexer = [slice(None)] * sorted_.ndim
idx = per / 100. * (sorted_.shape[axis] - 1)
if int(idx) != idx:
# round fractional indices according to interpolation method
if interpolation_method == 'lower':
idx = int(np.floor(idx))
elif interpolation_method == 'higher':
idx = int(np.ceil(idx))
elif interpolation_method == 'fraction':
pass # keep idx as fraction and interpolate
else:
raise ValueError("interpolation_method can only be 'fraction', "
"'lower' or 'higher'")
i = int(idx)
if i == idx:
indexer[axis] = slice(i, i + 1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i + 2)
j = i + 1
weights = array([(j - idx), (idx - i)], float)
wshape = [1] * sorted_.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use np.add.reduce (== np.sum but a little faster) to coerce data type
return np.add.reduce(sorted_[tuple(indexer)] * weights, axis=axis) / sumval
def percentileofscore(a, score, kind='rank'):
"""Compute the percentile rank of a score relative to a list of scores.
A `percentileofscore` of, for example, 80% means that 80% of the
scores in `a` are below the given score. In the case of gaps or
ties, the exact definition depends on the optional keyword, `kind`.
Parameters
----------
a : array_like
Array of scores to which `score` is compared.
score : int or float
Score that is compared to the elements in `a`.
kind : {'rank', 'weak', 'strict', 'mean'}, optional
Specifies the interpretation of the resulting score.
The following options are available (default is 'rank'):
* 'rank': Average percentage ranking of score. In case of multiple
matches, average the percentage rankings of all matching scores.
* 'weak': This kind corresponds to the definition of a cumulative
distribution function. A percentileofscore of 80% means that 80%
of values are less than or equal to the provided score.
* 'strict': Similar to "weak", except that only values that are
strictly less than the given score are counted.
* 'mean': The average of the "weak" and "strict" scores, often used
in testing. See https://en.wikipedia.org/wiki/Percentile_rank
Returns
-------
pcos : float
Percentile-position of score (0-100) relative to `a`.
See Also
--------
numpy.percentile
Examples
--------
Three-quarters of the given values lie below a given score:
>>> from scipy import stats
>>> stats.percentileofscore([1, 2, 3, 4], 3)
75.0
With multiple matches, note how the scores of the two matches, 0.6
and 0.8 respectively, are averaged:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3)
70.0
Only 2/5 values are strictly less than 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict')
40.0
But 4/5 values are less than or equal to 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak')
80.0
The average between the weak and the strict scores is:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean')
60.0
"""
if np.isnan(score):
return np.nan
a = np.asarray(a)
n = len(a)
if n == 0:
return 100.0
if kind == 'rank':
left = np.count_nonzero(a < score)
right = np.count_nonzero(a <= score)
pct = (right + left + (1 if right > left else 0)) * 50.0/n
return pct
elif kind == 'strict':
return np.count_nonzero(a < score) / n * 100
elif kind == 'weak':
return np.count_nonzero(a <= score) / n * 100
elif kind == 'mean':
pct = (np.count_nonzero(a < score)
+ np.count_nonzero(a <= score)) / n * 50
return pct
else:
raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'")
HistogramResult = namedtuple('HistogramResult',
('count', 'lowerlimit', 'binsize', 'extrapoints'))
def _histogram(a, numbins=10, defaultlimits=None, weights=None,
printextras=False):
"""Create a histogram.
Separate the range into several bins and return the number of instances
in each bin.
Parameters
----------
a : array_like
Array of scores which will be put into bins.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultlimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
printextras : bool, optional
If True, if there are extra points (i.e. the points that fall outside
the bin limits) a warning is raised saying how many of those points
there are. Default is False.
Returns
-------
count : ndarray
Number of points (or sum of weights) in each bin.
lowerlimit : float
Lowest value of histogram, the lower limit of the first bin.
binsize : float
The size of the bins (all bins have the same size).
extrapoints : int
The number of points outside the range of the histogram.
See Also
--------
numpy.histogram
Notes
-----
This histogram is based on numpy's histogram but has a larger range by
default if default limits is not set.
"""
a = np.ravel(a)
if defaultlimits is None:
if a.size == 0:
# handle empty arrays. Undetermined range, so use 0-1.
defaultlimits = (0, 1)
else:
# no range given, so use values in `a`
data_min = a.min()
data_max = a.max()
# Have bins extend past min and max values slightly
s = (data_max - data_min) / (2. * (numbins - 1.))
defaultlimits = (data_min - s, data_max + s)
# use numpy's histogram method to compute bins
hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits,
weights=weights)
# hist are not always floats, convert to keep with old output
hist = np.array(hist, dtype=float)
# fixed width for bins is assumed, as numpy's histogram gives
# fixed width bins for int values for 'bins'
binsize = bin_edges[1] - bin_edges[0]
# calculate number of extra points
extrapoints = len([v for v in a
if defaultlimits[0] > v or v > defaultlimits[1]])
if extrapoints > 0 and printextras:
warnings.warn("Points outside given histogram range = %s"
% extrapoints)
return HistogramResult(hist, defaultlimits[0], binsize, extrapoints)
CumfreqResult = namedtuple('CumfreqResult',
('cumcount', 'lowerlimit', 'binsize',
'extrapoints'))
def cumfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""Return a cumulative frequency histogram, using the histogram function.
A cumulative histogram is a mapping that counts the cumulative number of
observations in all of the bins up to the specified bin.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
cumcount : ndarray
Binned values of cumulative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy.random import default_rng
>>> from scipy import stats
>>> rng = default_rng()
>>> x = [1, 4, 2, 1, 3, 1]
>>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
>>> res.cumcount
array([ 1., 2., 3., 3.])
>>> res.extrapoints
3
Create a normal distribution with 1000 random values
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate cumulative frequencies
>>> res = stats.cumfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size,
... res.cumcount.size)
Plot histogram and cumulative histogram
>>> fig = plt.figure(figsize=(10, 4))
>>> ax1 = fig.add_subplot(1, 2, 1)
>>> ax2 = fig.add_subplot(1, 2, 2)
>>> ax1.hist(samples, bins=25)
>>> ax1.set_title('Histogram')
>>> ax2.bar(x, res.cumcount, width=res.binsize)
>>> ax2.set_title('Cumulative histogram')
>>> ax2.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
cumhist = np.cumsum(h * 1, axis=0)
return CumfreqResult(cumhist, l, b, e)
RelfreqResult = namedtuple('RelfreqResult',
('frequency', 'lowerlimit', 'binsize',
'extrapoints'))
def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""Return a relative frequency histogram, using the histogram function.
A relative frequency histogram is a mapping of the number of
observations in each of the bins relative to the total of observations.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
frequency : ndarray
Binned values of relative frequency.
lowerlimit : float
Lower real limit.
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy.random import default_rng
>>> from scipy import stats
>>> rng = default_rng()
>>> a = np.array([2, 4, 1, 2, 3, 2])
>>> res = stats.relfreq(a, numbins=4)
>>> res.frequency
array([ 0.16666667, 0.5 , 0.16666667, 0.16666667])
>>> np.sum(res.frequency) # relative frequencies should add up to 1
1.0
Create a normal distribution with 1000 random values
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate relative frequencies
>>> res = stats.relfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size,
... res.frequency.size)
Plot relative frequency histogram
>>> fig = plt.figure(figsize=(5, 4))
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.bar(x, res.frequency, width=res.binsize)
>>> ax.set_title('Relative frequency histogram')
>>> ax.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
a = np.asanyarray(a)
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
h = h / a.shape[0]
return RelfreqResult(h, l, b, e)
#####################################
# VARIABILITY FUNCTIONS #
#####################################
def obrientransform(*args):
"""Compute the O'Brien transform on input data (any number of arrays).
Used to test for homogeneity of variance prior to running one-way stats.
Each array in ``*args`` is one level of a factor.
If `f_oneway` is run on the transformed data and found significant,
the variances are unequal. From Maxwell and Delaney [1]_, p.112.
Parameters
----------
args : tuple of array_like
Any number of arrays.
Returns
-------
obrientransform : ndarray
Transformed data for use in an ANOVA. The first dimension
of the result corresponds to the sequence of transformed
arrays. If the arrays given are all 1-D of the same length,
the return value is a 2-D array; otherwise it is a 1-D array
of type object, with each element being an ndarray.
References
----------
.. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and
Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990.
Examples
--------
We'll test the following data sets for differences in their variance.
>>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10]
>>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15]
Apply the O'Brien transform to the data.
>>> from scipy.stats import obrientransform
>>> tx, ty = obrientransform(x, y)
Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the
transformed data.
>>> from scipy.stats import f_oneway
>>> F, p = f_oneway(tx, ty)
>>> p
0.1314139477040335
If we require that ``p < 0.05`` for significance, we cannot conclude
that the variances are different.
"""
TINY = np.sqrt(np.finfo(float).eps)
# `arrays` will hold the transformed arguments.
arrays = []
sLast = None
for arg in args:
a = np.asarray(arg)
n = len(a)
mu = np.mean(a)
sq = (a - mu)**2
sumsq = sq.sum()
# The O'Brien transform.
t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2))
# Check that the mean of the transformed data is equal to the
# original variance.
var = sumsq / (n - 1)
if abs(var - np.mean(t)) > TINY:
raise ValueError('Lack of convergence in obrientransform.')
arrays.append(t)
sLast = a.shape
if sLast:
for arr in arrays[:-1]:
if sLast != arr.shape:
return np.array(arrays, dtype=object)
return np.array(arrays)
def sem(a, axis=0, ddof=1, nan_policy='propagate'):
"""Compute standard error of the mean.
Calculate the standard error of the mean (or standard error of
measurement) of the values in the input array.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` is different to the default (0) used by other
ddof containing routines, such as np.std and np.nanstd.
Examples
--------
Find standard error along the first axis:
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.sem(a)
array([ 2.8284, 2.8284, 2.8284, 2.8284])
Find standard error across the whole array, using n degrees of freedom:
>>> stats.sem(a, axis=None, ddof=0)
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.sem(a, axis, ddof)
n = a.shape[axis]
s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n)
return s
def _isconst(x):
"""
Check if all values in x are the same. nans are ignored.
x must be a 1d array.
The return value is a 1d array with length 1, so it can be used
in np.apply_along_axis.
"""
y = x[~np.isnan(x)]
if y.size == 0:
return np.array([True])
else:
return (y[0] == y).all(keepdims=True)
def _quiet_nanmean(x):
"""
Compute nanmean for the 1d array x, but quietly return nan if x is all nan.
The return value is a 1d array with length 1, so it can be used
in np.apply_along_axis.
"""
y = x[~np.isnan(x)]
if y.size == 0:
return np.array([np.nan])
else:
return np.mean(y, keepdims=True)
def _quiet_nanstd(x, ddof=0):
"""
Compute nanstd for the 1d array x, but quietly return nan if x is all nan.
The return value is a 1d array with length 1, so it can be used
in np.apply_along_axis.
"""
y = x[~np.isnan(x)]
if y.size == 0:
return np.array([np.nan])
else:
return np.std(y, keepdims=True, ddof=ddof)
def zscore(a, axis=0, ddof=0, nan_policy='propagate'):
"""
Compute the z score.
Compute the z score of each value in the sample, relative to the
sample mean and standard deviation.
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'. Note that when the value is 'omit',
nans in the input also propagate to the output, but they do not affect
the z-scores computed for the non-nan values.
Returns
-------
zscore : array_like
The z-scores, standardized by mean and standard deviation of
input array `a`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091,
... 0.1954, 0.6307, 0.6599, 0.1065, 0.0508])
>>> from scipy import stats
>>> stats.zscore(a)
array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786,
0.6748, -1.1488, -1.3324])
Computing along a specified axis, using n-1 degrees of freedom
(``ddof=1``) to calculate the standard deviation:
>>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608],
... [ 0.7149, 0.0775, 0.6072, 0.9656],
... [ 0.6341, 0.1403, 0.9759, 0.4064],
... [ 0.5918, 0.6948, 0.904 , 0.3721],
... [ 0.0921, 0.2481, 0.1188, 0.1366]])
>>> stats.zscore(b, axis=1, ddof=1)
array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358],
[ 0.33048416, -1.37380874, 0.04251374, 1.00081084],
[ 0.26796377, -1.12598418, 1.23283094, -0.37481053],
[-0.22095197, 0.24468594, 1.19042819, -1.21416216],
[-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]])
An example with `nan_policy='omit'`:
>>> x = np.array([[25.11, 30.10, np.nan, 32.02, 43.15],
... [14.95, 16.06, 121.25, 94.35, 29.81]])
>>> stats.zscore(x, axis=1, nan_policy='omit')
array([[-1.13490897, -0.37830299, nan, -0.08718406, 1.60039602],
[-0.91611681, -0.89090508, 1.4983032 , 0.88731639, -0.5785977 ]])
"""
return zmap(a, a, axis=axis, ddof=ddof, nan_policy=nan_policy)
def zmap(scores, compare, axis=0, ddof=0, nan_policy='propagate'):
"""
Calculate the relative z-scores.
Return an array of z-scores, i.e., scores that are standardized to
zero mean and unit variance, where mean and variance are calculated
from the comparison array.
Parameters
----------
scores : array_like
The input for which z-scores are calculated.
compare : array_like
The input from which the mean and standard deviation of the
normalization are taken; assumed to have the same dimension as
`scores`.
axis : int or None, optional
Axis over which mean and variance of `compare` are calculated.
Default is 0. If None, compute over the whole array `scores`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle the occurrence of nans in `compare`.
'propagate' returns nan, 'raise' raises an exception, 'omit'
performs the calculations ignoring nan values. Default is
'propagate'. Note that when the value is 'omit', nans in `scores`
also propagate to the output, but they do not affect the z-scores
computed for the non-nan values.
Returns
-------
zscore : array_like
Z-scores, in the same shape as `scores`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> from scipy.stats import zmap
>>> a = [0.5, 2.0, 2.5, 3]
>>> b = [0, 1, 2, 3, 4]
>>> zmap(a, b)
array([-1.06066017, 0. , 0.35355339, 0.70710678])
"""
a = np.asanyarray(compare)
if a.size == 0:
return np.empty(a.shape)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
if axis is None:
mn = _quiet_nanmean(a.ravel())
std = _quiet_nanstd(a.ravel(), ddof=ddof)
isconst = _isconst(a.ravel())
else:
mn = np.apply_along_axis(_quiet_nanmean, axis, a)
std = np.apply_along_axis(_quiet_nanstd, axis, a, ddof=ddof)
isconst = np.apply_along_axis(_isconst, axis, a)
else:
mn = a.mean(axis=axis, keepdims=True)
std = a.std(axis=axis, ddof=ddof, keepdims=True)
if axis is None:
isconst = (a.item(0) == a).all()
else:
isconst = (_first(a, axis) == a).all(axis=axis, keepdims=True)
# Set std deviations that are 0 to 1 to avoid division by 0.
std[isconst] = 1.0
z = (scores - mn) / std
# Set the outputs associated with a constant input to nan.
z[np.broadcast_to(isconst, z.shape)] = np.nan
return z
def gstd(a, axis=0, ddof=1):
"""
Calculate the geometric standard deviation of an array.
The geometric standard deviation describes the spread of a set of numbers
where the geometric mean is preferred. It is a multiplicative factor, and
so a dimensionless quantity.
It is defined as the exponent of the standard deviation of ``log(a)``.
Mathematically the population geometric standard deviation can be
evaluated as::
gstd = exp(std(log(a)))
.. versionadded:: 1.3.0
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int, tuple or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degree of freedom correction in the calculation of the
geometric standard deviation. Default is 1.
Returns
-------
ndarray or float
An array of the geometric standard deviation. If `axis` is None or `a`
is a 1d array a float is returned.
Notes
-----
As the calculation requires the use of logarithms the geometric standard
deviation only supports strictly positive values. Any non-positive or
infinite values will raise a `ValueError`.
The geometric standard deviation is sometimes confused with the exponent of
the standard deviation, ``exp(std(a))``. Instead the geometric standard
deviation is ``exp(std(log(a)))``.
The default value for `ddof` is different to the default value (0) used
by other ddof containing functions, such as ``np.std`` and ``np.nanstd``.
Examples
--------
Find the geometric standard deviation of a log-normally distributed sample.
Note that the standard deviation of the distribution is one, on a
log scale this evaluates to approximately ``exp(1)``.
>>> from scipy.stats import gstd
>>> rng = np.random.default_rng()
>>> sample = rng.lognormal(mean=0, sigma=1, size=1000)
>>> gstd(sample)
2.810010162475324
Compute the geometric standard deviation of a multidimensional array and
of a given axis.
>>> a = np.arange(1, 25).reshape(2, 3, 4)
>>> gstd(a, axis=None)
2.2944076136018947
>>> gstd(a, axis=2)
array([[1.82424757, 1.22436866, 1.13183117],
[1.09348306, 1.07244798, 1.05914985]])
>>> gstd(a, axis=(1,2))
array([2.12939215, 1.22120169])
The geometric standard deviation further handles masked arrays.
>>> a = np.arange(1, 25).reshape(2, 3, 4)
>>> ma = np.ma.masked_where(a > 16, a)
>>> ma
masked_array(
data=[[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]],
[[13, 14, 15, 16],
[--, --, --, --],
[--, --, --, --]]],
mask=[[[False, False, False, False],
[False, False, False, False],
[False, False, False, False]],
[[False, False, False, False],
[ True, True, True, True],
[ True, True, True, True]]],
fill_value=999999)
>>> gstd(ma, axis=2)
masked_array(
data=[[1.8242475707663655, 1.2243686572447428, 1.1318311657788478],
[1.0934830582350938, --, --]],
mask=[[False, False, False],
[False, True, True]],
fill_value=999999)
"""
a = np.asanyarray(a)
log = ma.log if isinstance(a, ma.MaskedArray) else np.log
try:
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
return np.exp(np.std(log(a), axis=axis, ddof=ddof))
except RuntimeWarning as w:
if np.isinf(a).any():
raise ValueError(
'Infinite value encountered. The geometric standard deviation '
'is defined for strictly positive values only.'
) from w
a_nan = np.isnan(a)
a_nan_any = a_nan.any()
# exclude NaN's from negativity check, but
# avoid expensive masking for arrays with no NaN
if ((a_nan_any and np.less_equal(np.nanmin(a), 0)) or
(not a_nan_any and np.less_equal(a, 0).any())):
raise ValueError(
'Non positive value encountered. The geometric standard '
'deviation is defined for strictly positive values only.'
) from w
elif 'Degrees of freedom <= 0 for slice' == str(w):
raise ValueError(w) from w
else:
# Remaining warnings don't need to be exceptions.
return np.exp(np.std(log(a, where=~a_nan), axis=axis, ddof=ddof))
except TypeError as e:
raise ValueError(
'Invalid array input. The inputs could not be '
'safely coerced to any supported types') from e
# Private dictionary initialized only once at module level
# See https://en.wikipedia.org/wiki/Robust_measures_of_scale
_scale_conversions = {'raw': 1.0,
'normal': special.erfinv(0.5) * 2.0 * math.sqrt(2.0)}
def iqr(x, axis=None, rng=(25, 75), scale=1.0, nan_policy='propagate',
interpolation='linear', keepdims=False):
r"""
Compute the interquartile range of the data along the specified axis.
The interquartile range (IQR) is the difference between the 75th and
25th percentile of the data. It is a measure of the dispersion
similar to standard deviation or variance, but is much more robust
against outliers [2]_.
The ``rng`` parameter allows this function to compute other
percentile ranges than the actual IQR. For example, setting
``rng=(0, 100)`` is equivalent to `numpy.ptp`.
The IQR of an empty array is `np.nan`.
.. versionadded:: 0.18.0
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the range is computed. The default is to
compute the IQR for the entire array.
rng : Two-element sequence containing floats in range of [0,100] optional
Percentiles over which to compute the range. Each must be
between 0 and 100, inclusive. The default is the true IQR:
`(25, 75)`. The order of the elements is not important.
scale : scalar or str, optional
The numerical value of scale will be divided out of the final
result. The following string values are recognized:
* 'raw' : No scaling, just return the raw IQR.
**Deprecated!** Use `scale=1` instead.
* 'normal' : Scale by
:math:`2 \sqrt{2} erf^{-1}(\frac{1}{2}) \approx 1.349`.
The default is 1.0. The use of scale='raw' is deprecated.
Array-like scale is also allowed, as long
as it broadcasts correctly to the output such that
``out / scale`` is a valid operation. The output dimensions
depend on the input array, `x`, the `axis` argument, and the
`keepdims` flag.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
interpolation : {'linear', 'lower', 'higher', 'midpoint',
'nearest'}, optional
Specifies the interpolation method to use when the percentile
boundaries lie between two data points `i` and `j`.
The following options are available (default is 'linear'):
* 'linear': `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* 'lower': `i`.
* 'higher': `j`.
* 'nearest': `i` or `j` whichever is nearest.
* 'midpoint': `(i + j) / 2`.
keepdims : bool, optional
If this is set to `True`, the reduced axes are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original array `x`.
Returns
-------
iqr : scalar or ndarray
If ``axis=None``, a scalar is returned. If the input contains
integers or floats of smaller precision than ``np.float64``, then the
output data-type is ``np.float64``. Otherwise, the output data-type is
the same as that of the input.
See Also
--------
numpy.std, numpy.var
Notes
-----
This function is heavily dependent on the version of `numpy` that is
installed. Versions greater than 1.11.0b3 are highly recommended, as they
include a number of enhancements and fixes to `numpy.percentile` and
`numpy.nanpercentile` that affect the operation of this function. The
following modifications apply:
Below 1.10.0 : `nan_policy` is poorly defined.
The default behavior of `numpy.percentile` is used for 'propagate'. This
is a hybrid of 'omit' and 'propagate' that mostly yields a skewed
version of 'omit' since NaNs are sorted to the end of the data. A
warning is raised if there are NaNs in the data.
Below 1.9.0: `numpy.nanpercentile` does not exist.
This means that `numpy.percentile` is used regardless of `nan_policy`
and a warning is issued. See previous item for a description of the
behavior.
Below 1.9.0: `keepdims` and `interpolation` are not supported.
The keywords get ignored with a warning if supplied with non-default
values. However, multiple axes are still supported.
References
----------
.. [1] "Interquartile range" https://en.wikipedia.org/wiki/Interquartile_range
.. [2] "Robust measures of scale" https://en.wikipedia.org/wiki/Robust_measures_of_scale
.. [3] "Quantile" https://en.wikipedia.org/wiki/Quantile
Examples
--------
>>> from scipy.stats import iqr
>>> x = np.array([[10, 7, 4], [3, 2, 1]])
>>> x
array([[10, 7, 4],
[ 3, 2, 1]])
>>> iqr(x)
4.0
>>> iqr(x, axis=0)
array([ 3.5, 2.5, 1.5])
>>> iqr(x, axis=1)
array([ 3., 1.])
>>> iqr(x, axis=1, keepdims=True)
array([[ 3.],
[ 1.]])
"""
x = asarray(x)
# This check prevents percentile from raising an error later. Also, it is
# consistent with `np.var` and `np.std`.
if not x.size:
return np.nan
# An error may be raised here, so fail-fast, before doing lengthy
# computations, even though `scale` is not used until later
if isinstance(scale, str):
scale_key = scale.lower()
if scale_key not in _scale_conversions:
raise ValueError("{0} not a valid scale for `iqr`".format(scale))
if scale_key == 'raw':
warnings.warn(
"use of scale='raw' is deprecated, use scale=1.0 instead",
np.VisibleDeprecationWarning
)
scale = _scale_conversions[scale_key]
# Select the percentile function to use based on nans and policy
contains_nan, nan_policy = _contains_nan(x, nan_policy)
if contains_nan and nan_policy == 'omit':
percentile_func = np.nanpercentile
else:
percentile_func = np.percentile
if len(rng) != 2:
raise TypeError("quantile range must be two element sequence")
if np.isnan(rng).any():
raise ValueError("range must not contain NaNs")
rng = sorted(rng)
pct = percentile_func(x, rng, axis=axis, interpolation=interpolation,
keepdims=keepdims)
out = np.subtract(pct[1], pct[0])
if scale != 1.0:
out /= scale
return out
def _mad_1d(x, center, nan_policy):
# Median absolute deviation for 1-d array x.
# This is a helper function for `median_abs_deviation`; it assumes its
# arguments have been validated already. In particular, x must be a
# 1-d numpy array, center must be callable, and if nan_policy is not
# 'propagate', it is assumed to be 'omit', because 'raise' is handled
# in `median_abs_deviation`.
# No warning is generated if x is empty or all nan.
isnan = np.isnan(x)
if isnan.any():
if nan_policy == 'propagate':
return np.nan
x = x[~isnan]
if x.size == 0:
# MAD of an empty array is nan.
return np.nan
# Edge cases have been handled, so do the basic MAD calculation.
med = center(x)
mad = np.median(np.abs(x - med))
return mad
def median_abs_deviation(x, axis=0, center=np.median, scale=1.0,
nan_policy='propagate'):
r"""
Compute the median absolute deviation of the data along the given axis.
The median absolute deviation (MAD, [1]_) computes the median over the
absolute deviations from the median. It is a measure of dispersion
similar to the standard deviation but more robust to outliers [2]_.
The MAD of an empty array is ``np.nan``.
.. versionadded:: 1.5.0
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the range is computed. Default is 0. If None, compute
the MAD over the entire array.
center : callable, optional
A function that will return the central value. The default is to use
np.median. Any user defined function used will need to have the
function signature ``func(arr, axis)``.
scale : scalar or str, optional
The numerical value of scale will be divided out of the final
result. The default is 1.0. The string "normal" is also accepted,
and results in `scale` being the inverse of the standard normal
quantile function at 0.75, which is approximately 0.67449.
Array-like scale is also allowed, as long as it broadcasts correctly
to the output such that ``out / scale`` is a valid operation. The
output dimensions depend on the input array, `x`, and the `axis`
argument.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
mad : scalar or ndarray
If ``axis=None``, a scalar is returned. If the input contains
integers or floats of smaller precision than ``np.float64``, then the
output data-type is ``np.float64``. Otherwise, the output data-type is
the same as that of the input.
See Also
--------
numpy.std, numpy.var, numpy.median, scipy.stats.iqr, scipy.stats.tmean,
scipy.stats.tstd, scipy.stats.tvar
Notes
-----
The `center` argument only affects the calculation of the central value
around which the MAD is calculated. That is, passing in ``center=np.mean``
will calculate the MAD around the mean - it will not calculate the *mean*
absolute deviation.
The input array may contain `inf`, but if `center` returns `inf`, the
corresponding MAD for that data will be `nan`.
References
----------
.. [1] "Median absolute deviation",
https://en.wikipedia.org/wiki/Median_absolute_deviation
.. [2] "Robust measures of scale",
https://en.wikipedia.org/wiki/Robust_measures_of_scale
Examples
--------
When comparing the behavior of `median_abs_deviation` with ``np.std``,
the latter is affected when we change a single value of an array to have an
outlier value while the MAD hardly changes:
>>> from scipy import stats
>>> x = stats.norm.rvs(size=100, scale=1, random_state=123456)
>>> x.std()
0.9973906394005013
>>> stats.median_abs_deviation(x)
0.82832610097857
>>> x[0] = 345.6
>>> x.std()
34.42304872314415
>>> stats.median_abs_deviation(x)
0.8323442311590675
Axis handling example:
>>> x = np.array([[10, 7, 4], [3, 2, 1]])
>>> x
array([[10, 7, 4],
[ 3, 2, 1]])
>>> stats.median_abs_deviation(x)
array([3.5, 2.5, 1.5])
>>> stats.median_abs_deviation(x, axis=None)
2.0
Scale normal example:
>>> x = stats.norm.rvs(size=1000000, scale=2, random_state=123456)
>>> stats.median_abs_deviation(x)
1.3487398527041636
>>> stats.median_abs_deviation(x, scale='normal')
1.9996446978061115
"""
if not callable(center):
raise TypeError("The argument 'center' must be callable. The given "
f"value {repr(center)} is not callable.")
# An error may be raised here, so fail-fast, before doing lengthy
# computations, even though `scale` is not used until later
if isinstance(scale, str):
if scale.lower() == 'normal':
scale = 0.6744897501960817 # special.ndtri(0.75)
else:
raise ValueError(f"{scale} is not a valid scale value.")
x = asarray(x)
# Consistent with `np.var` and `np.std`.
if not x.size:
if axis is None:
return np.nan
nan_shape = tuple(item for i, item in enumerate(x.shape) if i != axis)
if nan_shape == ():
# Return nan, not array(nan)
return np.nan
return np.full(nan_shape, np.nan)
contains_nan, nan_policy = _contains_nan(x, nan_policy)
if contains_nan:
if axis is None:
mad = _mad_1d(x.ravel(), center, nan_policy)
else:
mad = np.apply_along_axis(_mad_1d, axis, x, center, nan_policy)
else:
if axis is None:
med = center(x, axis=None)
mad = np.median(np.abs(x - med))
else:
# Wrap the call to center() in expand_dims() so it acts like
# keepdims=True was used.
med = np.expand_dims(center(x, axis=axis), axis)
mad = np.median(np.abs(x - med), axis=axis)
return mad / scale
# Keep the top newline so that the message does not show up on the stats page
_median_absolute_deviation_deprec_msg = """
To preserve the existing default behavior, use
`scipy.stats.median_abs_deviation(..., scale=1/1.4826)`.
The value 1.4826 is not numerically precise for scaling
with a normal distribution. For a numerically precise value, use
`scipy.stats.median_abs_deviation(..., scale='normal')`.
"""
# Due to numpy/gh-16349 we need to unindent the entire docstring
@np.deprecate(old_name='median_absolute_deviation',
new_name='median_abs_deviation',
message=_median_absolute_deviation_deprec_msg)
def median_absolute_deviation(x, axis=0, center=np.median, scale=1.4826,
nan_policy='propagate'):
r"""
Compute the median absolute deviation of the data along the given axis.
The median absolute deviation (MAD, [1]_) computes the median over the
absolute deviations from the median. It is a measure of dispersion
similar to the standard deviation but more robust to outliers [2]_.
The MAD of an empty array is ``np.nan``.
.. versionadded:: 1.3.0
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the range is computed. Default is 0. If None, compute
the MAD over the entire array.
center : callable, optional
A function that will return the central value. The default is to use
np.median. Any user defined function used will need to have the function
signature ``func(arr, axis)``.
scale : int, optional
The scaling factor applied to the MAD. The default scale (1.4826)
ensures consistency with the standard deviation for normally distributed
data.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
mad : scalar or ndarray
If ``axis=None``, a scalar is returned. If the input contains
integers or floats of smaller precision than ``np.float64``, then the
output data-type is ``np.float64``. Otherwise, the output data-type is
the same as that of the input.
See Also
--------
numpy.std, numpy.var, numpy.median, scipy.stats.iqr, scipy.stats.tmean,
scipy.stats.tstd, scipy.stats.tvar
Notes
-----
The `center` argument only affects the calculation of the central value
around which the MAD is calculated. That is, passing in ``center=np.mean``
will calculate the MAD around the mean - it will not calculate the *mean*
absolute deviation.
References
----------
.. [1] "Median absolute deviation",
https://en.wikipedia.org/wiki/Median_absolute_deviation
.. [2] "Robust measures of scale",
https://en.wikipedia.org/wiki/Robust_measures_of_scale
Examples
--------
When comparing the behavior of `median_absolute_deviation` with ``np.std``,
the latter is affected when we change a single value of an array to have an
outlier value while the MAD hardly changes:
>>> from scipy import stats
>>> x = stats.norm.rvs(size=100, scale=1, random_state=123456)
>>> x.std()
0.9973906394005013
>>> stats.median_absolute_deviation(x)
1.2280762773108278
>>> x[0] = 345.6
>>> x.std()
34.42304872314415
>>> stats.median_absolute_deviation(x)
1.2340335571164334
Axis handling example:
>>> x = np.array([[10, 7, 4], [3, 2, 1]])
>>> x
array([[10, 7, 4],
[ 3, 2, 1]])
>>> stats.median_absolute_deviation(x)
array([5.1891, 3.7065, 2.2239])
>>> stats.median_absolute_deviation(x, axis=None)
2.9652
"""
if isinstance(scale, str):
if scale.lower() == 'raw':
warnings.warn(
"use of scale='raw' is deprecated, use scale=1.0 instead",
np.VisibleDeprecationWarning
)
scale = 1.0
if not isinstance(scale, str):
scale = 1 / scale
return median_abs_deviation(x, axis=axis, center=center, scale=scale,
nan_policy=nan_policy)
#####################################
# TRIMMING FUNCTIONS #
#####################################
SigmaclipResult = namedtuple('SigmaclipResult', ('clipped', 'lower', 'upper'))
def sigmaclip(a, low=4., high=4.):
"""Perform iterative sigma-clipping of array elements.
Starting from the full sample, all elements outside the critical range are
removed, i.e. all elements of the input array `c` that satisfy either of
the following conditions::
c < mean(c) - std(c)*low
c > mean(c) + std(c)*high
The iteration continues with the updated sample until no
elements are outside the (updated) range.
Parameters
----------
a : array_like
Data array, will be raveled if not 1-D.
low : float, optional
Lower bound factor of sigma clipping. Default is 4.
high : float, optional
Upper bound factor of sigma clipping. Default is 4.
Returns
-------
clipped : ndarray
Input array with clipped elements removed.
lower : float
Lower threshold value use for clipping.
upper : float
Upper threshold value use for clipping.
Examples
--------
>>> from scipy.stats import sigmaclip
>>> a = np.concatenate((np.linspace(9.5, 10.5, 31),
... np.linspace(0, 20, 5)))
>>> fact = 1.5
>>> c, low, upp = sigmaclip(a, fact, fact)
>>> c
array([ 9.96666667, 10. , 10.03333333, 10. ])
>>> c.var(), c.std()
(0.00055555555555555165, 0.023570226039551501)
>>> low, c.mean() - fact*c.std(), c.min()
(9.9646446609406727, 9.9646446609406727, 9.9666666666666668)
>>> upp, c.mean() + fact*c.std(), c.max()
(10.035355339059327, 10.035355339059327, 10.033333333333333)
>>> a = np.concatenate((np.linspace(9.5, 10.5, 11),
... np.linspace(-100, -50, 3)))
>>> c, low, upp = sigmaclip(a, 1.8, 1.8)
>>> (c == np.linspace(9.5, 10.5, 11)).all()
True
"""
c = np.asarray(a).ravel()
delta = 1
while delta:
c_std = c.std()
c_mean = c.mean()
size = c.size
critlower = c_mean - c_std * low
critupper = c_mean + c_std * high
c = c[(c >= critlower) & (c <= critupper)]
delta = size - c.size
return SigmaclipResult(c, critlower, critupper)
def trimboth(a, proportiontocut, axis=0):
"""Slice off a proportion of items from both ends of an array.
Slice off the passed proportion of items from both ends of the passed
array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and**
rightmost 10% of scores). The trimmed values are the lowest and
highest ones.
Slice off less if proportion results in a non-integer slice index (i.e.
conservatively slices off `proportiontocut`).
Parameters
----------
a : array_like
Data to trim.
proportiontocut : float
Proportion (in range 0-1) of total data set to trim of each end.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
out : ndarray
Trimmed version of array `a`. The order of the trimmed content
is undefined.
See Also
--------
trim_mean
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20)
>>> b = stats.trimboth(a, 0.1)
>>> b.shape
(16,)
"""
a = np.asarray(a)
if a.size == 0:
return a
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut >= uppercut):
raise ValueError("Proportion too big.")
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return atmp[tuple(sl)]
def trim1(a, proportiontocut, tail='right', axis=0):
"""Slice off a proportion from ONE end of the passed array distribution.
If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost'
10% of scores. The lowest or highest values are trimmed (depending on
the tail).
Slice off less if proportion results in a non-integer slice index
(i.e. conservatively slices off `proportiontocut` ).
Parameters
----------
a : array_like
Input array.
proportiontocut : float
Fraction to cut off of 'left' or 'right' of distribution.
tail : {'left', 'right'}, optional
Defaults to 'right'.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
trim1 : ndarray
Trimmed version of array `a`. The order of the trimmed content is
undefined.
"""
a = np.asarray(a)
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
# avoid possible corner case
if proportiontocut >= 1:
return []
if tail.lower() == 'right':
lowercut = 0
uppercut = nobs - int(proportiontocut * nobs)
elif tail.lower() == 'left':
lowercut = int(proportiontocut * nobs)
uppercut = nobs
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
return atmp[lowercut:uppercut]
def trim_mean(a, proportiontocut, axis=0):
"""Return mean of array after trimming distribution from both tails.
If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of
scores. The input is sorted before slicing. Slices off less if proportion
results in a non-integer slice index (i.e., conservatively slices off
`proportiontocut` ).
Parameters
----------
a : array_like
Input array.
proportiontocut : float
Fraction to cut off of both tails of the distribution.
axis : int or None, optional
Axis along which the trimmed means are computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
trim_mean : ndarray
Mean of trimmed array.
See Also
--------
trimboth
tmean : Compute the trimmed mean ignoring values outside given `limits`.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.trim_mean(x, 0.1)
9.5
>>> x2 = x.reshape(5, 4)
>>> x2
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> stats.trim_mean(x2, 0.25)
array([ 8., 9., 10., 11.])
>>> stats.trim_mean(x2, 0.25, axis=1)
array([ 1.5, 5.5, 9.5, 13.5, 17.5])
"""
a = np.asarray(a)
if a.size == 0:
return np.nan
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut > uppercut):
raise ValueError("Proportion too big.")
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return np.mean(atmp[tuple(sl)], axis=axis)
F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
class F_onewayConstantInputWarning(RuntimeWarning):
"""
Warning generated by `f_oneway` when an input is constant, e.g.
each of the samples provided is a constant array.
"""
def __init__(self, msg=None):
if msg is None:
msg = ("Each of the input arrays is constant;"
"the F statistic is not defined or infinite")
self.args = (msg,)
class F_onewayBadInputSizesWarning(RuntimeWarning):
"""
Warning generated by `f_oneway` when an input has length 0,
or if all the inputs have length 1.
"""
pass
def _create_f_oneway_nan_result(shape, axis):
"""
This is a helper function for f_oneway for creating the return values
in certain degenerate conditions. It creates return values that are
all nan with the appropriate shape for the given `shape` and `axis`.
"""
axis = np.core.multiarray.normalize_axis_index(axis, len(shape))
shp = shape[:axis] + shape[axis+1:]
if shp == ():
f = np.nan
prob = np.nan
else:
f = np.full(shp, fill_value=np.nan)
prob = f.copy()
return F_onewayResult(f, prob)
def _first(arr, axis):
"""Return arr[..., 0:1, ...] where 0:1 is in the `axis` position."""
return np.take_along_axis(arr, np.array(0, ndmin=arr.ndim), axis)
def f_oneway(*args, axis=0):
"""Perform one-way ANOVA.
The one-way ANOVA tests the null hypothesis that two or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group. There must be at least
two arguments. If the arrays are multidimensional, then all the
dimensions of the array must be the same except for `axis`.
axis : int, optional
Axis of the input arrays along which the test is applied.
Default is 0.
Returns
-------
statistic : float
The computed F statistic of the test.
pvalue : float
The associated p-value from the F distribution.
Warns
-----
F_onewayConstantInputWarning
Raised if each of the input arrays is constant array.
In this case the F statistic is either infinite or isn't defined,
so ``np.inf`` or ``np.nan`` is returned.
F_onewayBadInputSizesWarning
Raised if the length of any input array is 0, or if all the input
arrays have length 1. ``np.nan`` is returned for the F statistic
and the p-value in these cases.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still
be possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) or
the Alexander-Govern test (`scipy.stats.alexandergovern`) although with
some loss of power.
The length of each group must be at least one, and there must be at
least one group with length greater than one. If these conditions
are not satisfied, a warning is generated and (``np.nan``, ``np.nan``)
is returned.
If each group contains constant values, and there exist at least two
groups with different values, the function generates a warning and
returns (``np.inf``, 0).
If all values in all groups are the same, function generates a warning
and returns (``np.nan``, ``np.nan``).
The algorithm is from Heiman [2]_, pp.394-7.
References
----------
.. [1] R. Lowry, "Concepts and Applications of Inferential Statistics",
Chapter 14, 2014, http://vassarstats.net/textbook/
.. [2] G.W. Heiman, "Understanding research methods and statistics: An
integrated introduction for psychology", Houghton, Mifflin and
Company, 2001.
.. [3] G.H. McDonald, "Handbook of Biological Statistics", One-way ANOVA.
http://www.biostathandbook.com/onewayanova.html
Examples
--------
>>> from scipy.stats import f_oneway
Here are some data [3]_ on a shell measurement (the length of the anterior
adductor muscle scar, standardized by dividing by length) in the mussel
Mytilus trossulus from five locations: Tillamook, Oregon; Newport, Oregon;
Petersburg, Alaska; Magadan, Russia; and Tvarminne, Finland, taken from a
much larger data set used in McDonald et al. (1991).
>>> tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735,
... 0.0659, 0.0923, 0.0836]
>>> newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835,
... 0.0725]
>>> petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105]
>>> magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764,
... 0.0689]
>>> tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045]
>>> f_oneway(tillamook, newport, petersburg, magadan, tvarminne)
F_onewayResult(statistic=7.121019471642447, pvalue=0.0002812242314534544)
`f_oneway` accepts multidimensional input arrays. When the inputs
are multidimensional and `axis` is not given, the test is performed
along the first axis of the input arrays. For the following data, the
test is performed three times, once for each column.
>>> a = np.array([[9.87, 9.03, 6.81],
... [7.18, 8.35, 7.00],
... [8.39, 7.58, 7.68],
... [7.45, 6.33, 9.35],
... [6.41, 7.10, 9.33],
... [8.00, 8.24, 8.44]])
>>> b = np.array([[6.35, 7.30, 7.16],
... [6.65, 6.68, 7.63],
... [5.72, 7.73, 6.72],
... [7.01, 9.19, 7.41],
... [7.75, 7.87, 8.30],
... [6.90, 7.97, 6.97]])
>>> c = np.array([[3.31, 8.77, 1.01],
... [8.25, 3.24, 3.62],
... [6.32, 8.81, 5.19],
... [7.48, 8.83, 8.91],
... [8.59, 6.01, 6.07],
... [3.07, 9.72, 7.48]])
>>> F, p = f_oneway(a, b, c)
>>> F
array([1.75676344, 0.03701228, 3.76439349])
>>> p
array([0.20630784, 0.96375203, 0.04733157])
"""
if len(args) < 2:
raise TypeError(f'at least two inputs are required; got {len(args)}.')
args = [np.asarray(arg, dtype=float) for arg in args]
# ANOVA on N groups, each in its own array
num_groups = len(args)
# We haven't explicitly validated axis, but if it is bad, this call of
# np.concatenate will raise np.AxisError. The call will raise ValueError
# if the dimensions of all the arrays, except the axis dimension, are not
# the same.
alldata = np.concatenate(args, axis=axis)
bign = alldata.shape[axis]
# Check this after forming alldata, so shape errors are detected
# and reported before checking for 0 length inputs.
if any(arg.shape[axis] == 0 for arg in args):
warnings.warn(F_onewayBadInputSizesWarning('at least one input '
'has length 0'))
return _create_f_oneway_nan_result(alldata.shape, axis)
# Must have at least one group with length greater than 1.
if all(arg.shape[axis] == 1 for arg in args):
msg = ('all input arrays have length 1. f_oneway requires that at '
'least one input has length greater than 1.')
warnings.warn(F_onewayBadInputSizesWarning(msg))
return _create_f_oneway_nan_result(alldata.shape, axis)
# Check if the values within each group are constant, and if the common
# value in at least one group is different from that in another group.
# Based on https://github.com/scipy/scipy/issues/11669
# If axis=0, say, and the groups have shape (n0, ...), (n1, ...), ...,
# then is_const is a boolean array with shape (num_groups, ...).
# It is True if the groups along the axis slice are each consant.
# In the typical case where each input array is 1-d, is_const is a
# 1-d array with length num_groups.
is_const = np.concatenate([(_first(a, axis) == a).all(axis=axis,
keepdims=True)
for a in args], axis=axis)
# all_const is a boolean array with shape (...) (see previous comment).
# It is True if the values within each group along the axis slice are
# the same (e.g. [[3, 3, 3], [5, 5, 5, 5], [4, 4, 4]]).
all_const = is_const.all(axis=axis)
if all_const.any():
warnings.warn(F_onewayConstantInputWarning())
# all_same_const is True if all the values in the groups along the axis=0
# slice are the same (e.g. [[3, 3, 3], [3, 3, 3, 3], [3, 3, 3]]).
all_same_const = (_first(alldata, axis) == alldata).all(axis=axis)
# Determine the mean of the data, and subtract that from all inputs to a
# variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariant
# to a shift in location, and centering all data around zero vastly
# improves numerical stability.
offset = alldata.mean(axis=axis, keepdims=True)
alldata -= offset
normalized_ss = _square_of_sums(alldata, axis=axis) / bign
sstot = _sum_of_squares(alldata, axis=axis) - normalized_ss
ssbn = 0
for a in args:
ssbn += _square_of_sums(a - offset, axis=axis) / a.shape[axis]
# Naming: variables ending in bn/b are for "between treatments", wn/w are
# for "within treatments"
ssbn -= normalized_ss
sswn = sstot - ssbn
dfbn = num_groups - 1
dfwn = bign - num_groups
msb = ssbn / dfbn
msw = sswn / dfwn
with np.errstate(divide='ignore', invalid='ignore'):
f = msb / msw
prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf
# Fix any f values that should be inf or nan because the corresponding
# inputs were constant.
if np.isscalar(f):
if all_same_const:
f = np.nan
prob = np.nan
elif all_const:
f = np.inf
prob = 0.0
else:
f[all_const] = np.inf
prob[all_const] = 0.0
f[all_same_const] = np.nan
prob[all_same_const] = np.nan
return F_onewayResult(f, prob)
def alexandergovern(*args, nan_policy='propagate'):
"""Performs the Alexander Govern test.
The Alexander-Govern approximation tests the equality of k independent
means in the face of heterogeneity of variance. The test is applied to
samples from two or more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group. There must be at least
two samples.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float
The computed A statistic of the test.
pvalue : float
The associated p-value from the chi-squared distribution.
Warns
-----
AlexanderGovernConstantInputWarning
Raised if an input is a constant array. The statistic is not defined
in this case, so ``np.nan`` is returned.
See Also
--------
f_oneway : one-way ANOVA
Notes
-----
The use of this test relies on several assumptions.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. Unlike `f_oneway`, this test does not assume on homoscedasticity,
instead relaxing the assumption of equal variances.
Input samples must be finite, one dimensional, and with size greater than
one.
References
----------
.. [1] Alexander, Ralph A., and Diane M. Govern. "A New and Simpler
Approximation for ANOVA under Variance Heterogeneity." Journal
of Educational Statistics, vol. 19, no. 2, 1994, pp. 91-101.
JSTOR, www.jstor.org/stable/1165140. Accessed 12 Sept. 2020.
Examples
--------
>>> from scipy.stats import alexandergovern
Here are some data on annual percentage rate of interest charged on
new car loans at nine of the largest banks in four American cities
taken from the National Institute of Standards and Technology's
ANOVA dataset.
We use `alexandergovern` to test the null hypothesis that all cities
have the same mean APR against the alternative that the cities do not
all have the same mean APR. We decide that a sigificance level of 5%
is required to reject the null hypothesis in favor of the alternative.
>>> atlanta = [13.75, 13.75, 13.5, 13.5, 13.0, 13.0, 13.0, 12.75, 12.5]
>>> chicago = [14.25, 13.0, 12.75, 12.5, 12.5, 12.4, 12.3, 11.9, 11.9]
>>> houston = [14.0, 14.0, 13.51, 13.5, 13.5, 13.25, 13.0, 12.5, 12.5]
>>> memphis = [15.0, 14.0, 13.75, 13.59, 13.25, 12.97, 12.5, 12.25,
... 11.89]
>>> alexandergovern(atlanta, chicago, houston, memphis)
AlexanderGovernResult(statistic=4.65087071883494,
pvalue=0.19922132490385214)
The p-value is 0.1992, indicating a nearly 20% chance of observing
such an extreme value of the test statistic under the null hypothesis.
This exceeds 5%, so we do not reject the null hypothesis in favor of
the alternative.
"""
args = _alexandergovern_input_validation(args, nan_policy)
if np.any([(arg == arg[0]).all() for arg in args]):
warnings.warn(AlexanderGovernConstantInputWarning())
return AlexanderGovernResult(np.nan, np.nan)
# The following formula numbers reference the equation described on
# page 92 by Alexander, Govern. Formulas 5, 6, and 7 describe other
# tests that serve as the basis for equation (8) but are not needed
# to perform the test.
# precalculate mean and length of each sample
lengths = np.array([ma.count(arg) if nan_policy == 'omit' else len(arg)
for arg in args])
means = np.array([np.mean(arg) for arg in args])
# (1) determine standard error of the mean for each sample
standard_errors = [np.std(arg, ddof=1) / np.sqrt(length)
for arg, length in zip(args, lengths)]
# (2) define a weight for each sample
inv_sq_se = 1 / np.square(standard_errors)
weights = inv_sq_se / np.sum(inv_sq_se)
# (3) determine variance-weighted estimate of the common mean
var_w = np.sum(weights * means)
# (4) determine one-sample t statistic for each group
t_stats = (means - var_w)/standard_errors
# calculate parameters to be used in transformation
v = lengths - 1
a = v - .5
b = 48 * a**2
c = (a * np.log(1 + (t_stats ** 2)/v))**.5
# (8) perform a normalizing transformation on t statistic
z = (c + ((c**3 + 3*c)/b) -
((4*c**7 + 33*c**5 + 240*c**3 + 855*c) /
(b**2*10 + 8*b*c**4 + 1000*b)))
# (9) calculate statistic
A = np.sum(np.square(z))
# "[the p value is determined from] central chi-square random deviates
# with k - 1 degrees of freedom". Alexander, Govern (94)
p = distributions.chi2.sf(A, len(args) - 1)
return AlexanderGovernResult(A, p)
def _alexandergovern_input_validation(args, nan_policy):
if len(args) < 2:
raise TypeError(f"2 or more inputs required, got {len(args)}")
# input arrays are flattened
args = [np.asarray(arg, dtype=float) for arg in args]
for i, arg in enumerate(args):
if np.size(arg) <= 1:
raise ValueError("Input sample size must be greater than one.")
if arg.ndim != 1:
raise ValueError("Input samples must be one-dimensional")
if np.isinf(arg).any():
raise ValueError("Input samples must be finite.")
contains_nan, nan_policy = _contains_nan(arg, nan_policy=nan_policy)
if contains_nan and nan_policy == 'omit':
args[i] = ma.masked_invalid(arg)
return args
AlexanderGovernResult = make_dataclass("AlexanderGovernResult", ("statistic",
"pvalue"))
class AlexanderGovernConstantInputWarning(RuntimeWarning):
"""Warning generated by `alexandergovern` when an input is constant."""
def __init__(self, msg=None):
if msg is None:
msg = ("An input array is constant; the statistic is not defined.")
self.args = (msg,)
class PearsonRConstantInputWarning(RuntimeWarning):
"""Warning generated by `pearsonr` when an input is constant."""
def __init__(self, msg=None):
if msg is None:
msg = ("An input array is constant; the correlation coefficent "
"is not defined.")
self.args = (msg,)
class PearsonRNearConstantInputWarning(RuntimeWarning):
"""Warning generated by `pearsonr` when an input is nearly constant."""
def __init__(self, msg=None):
if msg is None:
msg = ("An input array is nearly constant; the computed "
"correlation coefficent may be inaccurate.")
self.args = (msg,)
def pearsonr(x, y):
r"""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient [1]_ measures the linear relationship
between two datasets. The calculation of the p-value relies on the
assumption that each dataset is normally distributed. (See Kowalski [3]_
for a discussion of the effects of non-normality of the input on the
distribution of the correlation coefficient.) Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear relationship.
Positive correlations imply that as x increases, so does y. Negative
correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets.
Parameters
----------
x : (N,) array_like
Input array.
y : (N,) array_like
Input array.
Returns
-------
r : float
Pearson's correlation coefficient.
p-value : float
Two-tailed p-value.
Warns
-----
PearsonRConstantInputWarning
Raised if an input is a constant array. The correlation coefficient
is not defined in this case, so ``np.nan`` is returned.
PearsonRNearConstantInputWarning
Raised if an input is "nearly" constant. The array ``x`` is considered
nearly constant if ``norm(x - mean(x)) < 1e-13 * abs(mean(x))``.
Numerical errors in the calculation ``x - mean(x)`` in this case might
result in an inaccurate calculation of r.
See Also
--------
spearmanr : Spearman rank-order correlation coefficient.
kendalltau : Kendall's tau, a correlation measure for ordinal data.
Notes
-----
The correlation coefficient is calculated as follows:
.. math::
r = \frac{\sum (x - m_x) (y - m_y)}
{\sqrt{\sum (x - m_x)^2 \sum (y - m_y)^2}}
where :math:`m_x` is the mean of the vector :math:`x` and :math:`m_y` is
the mean of the vector :math:`y`.
Under the assumption that :math:`x` and :math:`m_y` are drawn from
independent normal distributions (so the population correlation coefficient
is 0), the probability density function of the sample correlation
coefficient :math:`r` is ([1]_, [2]_):
.. math::
f(r) = \frac{{(1-r^2)}^{n/2-2}}{\mathrm{B}(\frac{1}{2},\frac{n}{2}-1)}
where n is the number of samples, and B is the beta function. This
is sometimes referred to as the exact distribution of r. This is
the distribution that is used in `pearsonr` to compute the p-value.
The distribution is a beta distribution on the interval [-1, 1],
with equal shape parameters a = b = n/2 - 1. In terms of SciPy's
implementation of the beta distribution, the distribution of r is::
dist = scipy.stats.beta(n/2 - 1, n/2 - 1, loc=-1, scale=2)
The p-value returned by `pearsonr` is a two-sided p-value. For a
given sample with correlation coefficient r, the p-value is
the probability that abs(r') of a random sample x' and y' drawn from
the population with zero correlation would be greater than or equal
to abs(r). In terms of the object ``dist`` shown above, the p-value
for a given r and length n can be computed as::
p = 2*dist.cdf(-abs(r))
When n is 2, the above continuous distribution is not well-defined.
One can interpret the limit of the beta distribution as the shape
parameters a and b approach a = b = 0 as a discrete distribution with
equal probability masses at r = 1 and r = -1. More directly, one
can observe that, given the data x = [x1, x2] and y = [y1, y2], and
assuming x1 != x2 and y1 != y2, the only possible values for r are 1
and -1. Because abs(r') for any sample x' and y' with length 2 will
be 1, the two-sided p-value for a sample of length 2 is always 1.
References
----------
.. [1] "Pearson correlation coefficient", Wikipedia,
https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
.. [2] Student, "Probable error of a correlation coefficient",
Biometrika, Volume 6, Issue 2-3, 1 September 1908, pp. 302-310.
.. [3] C. J. Kowalski, "On the Effects of Non-Normality on the Distribution
of the Sample Product-Moment Correlation Coefficient"
Journal of the Royal Statistical Society. Series C (Applied
Statistics), Vol. 21, No. 1 (1972), pp. 1-12.
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pearsonr(a, b)
(0.8660254037844386, 0.011724811003954649)
>>> stats.pearsonr([1, 2, 3, 4, 5], [10, 9, 2.5, 6, 4])
(-0.7426106572325057, 0.1505558088534455)
"""
n = len(x)
if n != len(y):
raise ValueError('x and y must have the same length.')
if n < 2:
raise ValueError('x and y must have length at least 2.')
x = np.asarray(x)
y = np.asarray(y)
# If an input is constant, the correlation coefficient is not defined.
if (x == x[0]).all() or (y == y[0]).all():
warnings.warn(PearsonRConstantInputWarning())
return np.nan, np.nan
# dtype is the data type for the calculations. This expression ensures
# that the data type is at least 64 bit floating point. It might have
# more precision if the input is, for example, np.longdouble.
dtype = type(1.0 + x[0] + y[0])
if n == 2:
return dtype(np.sign(x[1] - x[0])*np.sign(y[1] - y[0])), 1.0
xmean = x.mean(dtype=dtype)
ymean = y.mean(dtype=dtype)
# By using `astype(dtype)`, we ensure that the intermediate calculations
# use at least 64 bit floating point.
xm = x.astype(dtype) - xmean
ym = y.astype(dtype) - ymean
# Unlike np.linalg.norm or the expression sqrt((xm*xm).sum()),
# scipy.linalg.norm(xm) does not overflow if xm is, for example,
# [-5e210, 5e210, 3e200, -3e200]
normxm = linalg.norm(xm)
normym = linalg.norm(ym)
threshold = 1e-13
if normxm < threshold*abs(xmean) or normym < threshold*abs(ymean):
# If all the values in x (likewise y) are very close to the mean,
# the loss of precision that occurs in the subtraction xm = x - xmean
# might result in large errors in r.
warnings.warn(PearsonRNearConstantInputWarning())
r = np.dot(xm/normxm, ym/normym)
# Presumably, if abs(r) > 1, then it is only some small artifact of
# floating point arithmetic.
r = max(min(r, 1.0), -1.0)
# As explained in the docstring, the p-value can be computed as
# p = 2*dist.cdf(-abs(r))
# where dist is the beta distribution on [-1, 1] with shape parameters
# a = b = n/2 - 1. `special.btdtr` is the CDF for the beta distribution
# on [0, 1]. To use it, we make the transformation x = (r + 1)/2; the
# shape parameters do not change. Then -abs(r) used in `cdf(-abs(r))`
# becomes x = (-abs(r) + 1)/2 = 0.5*(1 - abs(r)). (r is cast to float64
# to avoid a TypeError raised by btdtr when r is higher precision.)
ab = n/2 - 1
prob = 2*special.btdtr(ab, ab, 0.5*(1 - abs(np.float64(r))))
return r, prob
def fisher_exact(table, alternative='two-sided'):
"""Perform a Fisher exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements should be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
Returns
-------
oddsratio : float
This is prior odds ratio and not a posterior estimate.
p_value : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table.
barnard_exact : Barnard's exact test, which is a more powerful alternative
than Fisher's exact test for 2x2 contingency tables.
Notes
-----
The calculated odds ratio is different from the one R uses. This scipy
implementation returns the (more common) "unconditional Maximum
Likelihood Estimate", while R uses the "conditional Maximum Likelihood
Estimate".
For tables with large numbers, the (inexact) chi-square test implemented
in the function `chi2_contingency` can also be used.
Examples
--------
Say we spend a few days counting whales and sharks in the Atlantic and
Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the
Indian ocean 2 whales and 5 sharks. Then our contingency table is::
Atlantic Indian
whales 8 2
sharks 1 5
We use this table to find the p-value:
>>> import scipy.stats as stats
>>> oddsratio, pvalue = stats.fisher_exact([[8, 2], [1, 5]])
>>> pvalue
0.0349...
The probability that we would observe this or an even more imbalanced ratio
by chance is about 3.5%. A commonly used significance level is 5%--if we
adopt that, we can therefore conclude that our observed imbalance is
statistically significant; whales prefer the Atlantic while sharks prefer
the Indian ocean.
"""
hypergeom = distributions.hypergeom
# int32 is not enough for the algorithm
c = np.asarray(table, dtype=np.int64)
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
# If both values in a row or column are zero, the p-value is 1 and
# the odds ratio is NaN.
return np.nan, 1.0
if c[1, 0] > 0 and c[0, 1] > 0:
oddsratio = c[0, 0] * c[1, 1] / (c[1, 0] * c[0, 1])
else:
oddsratio = np.inf
n1 = c[0, 0] + c[0, 1]
n2 = c[1, 0] + c[1, 1]
n = c[0, 0] + c[1, 0]
def binary_search(n, n1, n2, side):
"""Binary search for where to begin halves in two-sided test."""
if side == "upper":
minval = mode
maxval = n
else:
minval = 0
maxval = mode
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = hypergeom.pmf(guess, n1 + n2, n1, n)
if side == "upper":
ng = guess - 1
else:
ng = guess + 1
if pguess <= pexact < hypergeom.pmf(ng, n1 + n2, n1, n):
break
elif pguess < pexact:
maxval = guess
else:
minval = guess
if guess == -1:
guess = minval
if side == "upper":
while guess > 0 and \
hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess -= 1
while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess += 1
else:
while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess += 1
while guess > 0 and \
hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess -= 1
return guess
if alternative == 'less':
pvalue = hypergeom.cdf(c[0, 0], n1 + n2, n1, n)
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalue = hypergeom.cdf(c[0, 1], n1 + n2, n1, c[0, 1] + c[1, 1])
elif alternative == 'two-sided':
mode = int((n + 1) * (n1 + 1) / (n1 + n2 + 2))
pexact = hypergeom.pmf(c[0, 0], n1 + n2, n1, n)
pmode = hypergeom.pmf(mode, n1 + n2, n1, n)
epsilon = 1 - 1e-4
if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:
return oddsratio, 1.
elif c[0, 0] < mode:
plower = hypergeom.cdf(c[0, 0], n1 + n2, n1, n)
if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, plower
guess = binary_search(n, n1, n2, "upper")
pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n)
else:
pupper = hypergeom.sf(c[0, 0] - 1, n1 + n2, n1, n)
if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, pupper
guess = binary_search(n, n1, n2, "lower")
pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n)
else:
msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
raise ValueError(msg)
pvalue = min(pvalue, 1.0)
return oddsratio, pvalue
class SpearmanRConstantInputWarning(RuntimeWarning):
"""Warning generated by `spearmanr` when an input is constant."""
def __init__(self, msg=None):
if msg is None:
msg = ("An input array is constant; the correlation coefficent "
"is not defined.")
self.args = (msg,)
SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue'))
def spearmanr(a, b=None, axis=0, nan_policy='propagate',
alternative='two-sided'):
"""Calculate a Spearman correlation coefficient with associated p-value.
The Spearman rank-order correlation coefficient is a nonparametric measure
of the monotonicity of the relationship between two datasets. Unlike the
Pearson correlation, the Spearman correlation does not assume that both
datasets are normally distributed. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Correlations of -1 or +1 imply an exact monotonic relationship. Positive
correlations imply that as x increases, so does y. Negative correlations
imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
a, b : 1D or 2D array_like, b is optional
One or two 1-D or 2-D arrays containing multiple variables and
observations. When these are 1-D, each represents a vector of
observations of a single variable. For the behavior in the 2-D case,
see under ``axis``, below.
Both arrays need to have the same length in the ``axis`` dimension.
axis : int or None, optional
If axis=0 (default), then each column represents a variable, with
observations in the rows. If axis=1, the relationship is transposed:
each row represents a variable, while the columns contain observations.
If axis=None, then both arrays will be raveled.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': the correlation is nonzero
* 'less': the correlation is negative (less than zero)
* 'greater': the correlation is positive (greater than zero)
.. versionadded:: 1.7.0
Returns
-------
correlation : float or ndarray (2-D square)
Spearman correlation matrix or correlation coefficient (if only 2
variables are given as parameters. Correlation matrix is square with
length equal to total number of variables (columns or rows) in ``a``
and ``b`` combined.
pvalue : float
The p-value for a hypothesis test whose null hypotheisis
is that two sets of data are uncorrelated. See `alternative` above
for alternative hypotheses. `pvalue` has the same
shape as `correlation`.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 14.7
Examples
--------
>>> from scipy import stats
>>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7])
SpearmanrResult(correlation=0.82078..., pvalue=0.08858...)
>>> rng = np.random.default_rng()
>>> x2n = rng.standard_normal((100, 2))
>>> y2n = rng.standard_normal((100, 2))
>>> stats.spearmanr(x2n)
SpearmanrResult(correlation=-0.07960396039603959, pvalue=0.4311168705769747)
>>> stats.spearmanr(x2n[:,0], x2n[:,1])
SpearmanrResult(correlation=-0.07960396039603959, pvalue=0.4311168705769747)
>>> rho, pval = stats.spearmanr(x2n, y2n)
>>> rho
array([[ 1. , -0.07960396, -0.08314431, 0.09662166],
[-0.07960396, 1. , -0.14448245, 0.16738074],
[-0.08314431, -0.14448245, 1. , 0.03234323],
[ 0.09662166, 0.16738074, 0.03234323, 1. ]])
>>> pval
array([[0. , 0.43111687, 0.41084066, 0.33891628],
[0.43111687, 0. , 0.15151618, 0.09600687],
[0.41084066, 0.15151618, 0. , 0.74938561],
[0.33891628, 0.09600687, 0.74938561, 0. ]])
>>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1)
>>> rho
array([[ 1. , -0.07960396, -0.08314431, 0.09662166],
[-0.07960396, 1. , -0.14448245, 0.16738074],
[-0.08314431, -0.14448245, 1. , 0.03234323],
[ 0.09662166, 0.16738074, 0.03234323, 1. ]])
>>> stats.spearmanr(x2n, y2n, axis=None)
SpearmanrResult(correlation=0.044981624540613524, pvalue=0.5270803651336189)
>>> stats.spearmanr(x2n.ravel(), y2n.ravel())
SpearmanrResult(correlation=0.044981624540613524, pvalue=0.5270803651336189)
>>> rng = np.random.default_rng()
>>> xint = rng.integers(10, size=(100, 2))
>>> stats.spearmanr(xint)
SpearmanrResult(correlation=0.09800224850707953, pvalue=0.3320271757932076)
"""
if axis is not None and axis > 1:
raise ValueError("spearmanr only handles 1-D or 2-D arrays, "
"supplied axis argument {}, please use only "
"values 0, 1 or None for axis".format(axis))
a, axisout = _chk_asarray(a, axis)
if a.ndim > 2:
raise ValueError("spearmanr only handles 1-D or 2-D arrays")
if b is None:
if a.ndim < 2:
raise ValueError("`spearmanr` needs at least 2 "
"variables to compare")
else:
# Concatenate a and b, so that we now only have to handle the case
# of a 2-D `a`.
b, _ = _chk_asarray(b, axis)
if axisout == 0:
a = np.column_stack((a, b))
else:
a = np.row_stack((a, b))
n_vars = a.shape[1 - axisout]
n_obs = a.shape[axisout]
if n_obs <= 1:
# Handle empty arrays or single observations.
return SpearmanrResult(np.nan, np.nan)
if axisout == 0:
if (a[:, 0][0] == a[:, 0]).all() or (a[:, 1][0] == a[:, 1]).all():
# If an input is constant, the correlation coefficient
# is not defined.
warnings.warn(SpearmanRConstantInputWarning())
return SpearmanrResult(np.nan, np.nan)
else: # case when axisout == 1 b/c a is 2 dim only
if (a[0, :][0] == a[0, :]).all() or (a[1, :][0] == a[1, :]).all():
# If an input is constant, the correlation coefficient
# is not defined.
warnings.warn(SpearmanRConstantInputWarning())
return SpearmanrResult(np.nan, np.nan)
a_contains_nan, nan_policy = _contains_nan(a, nan_policy)
variable_has_nan = np.zeros(n_vars, dtype=bool)
if a_contains_nan:
if nan_policy == 'omit':
return mstats_basic.spearmanr(a, axis=axis, nan_policy=nan_policy,
alternative=alternative)
elif nan_policy == 'propagate':
if a.ndim == 1 or n_vars <= 2:
return SpearmanrResult(np.nan, np.nan)
else:
# Keep track of variables with NaNs, set the outputs to NaN
# only for those variables
variable_has_nan = np.isnan(a).any(axis=axisout)
a_ranked = np.apply_along_axis(rankdata, axisout, a)
rs = np.corrcoef(a_ranked, rowvar=axisout)
dof = n_obs - 2 # degrees of freedom
# rs can have elements equal to 1, so avoid zero division warnings
with np.errstate(divide='ignore'):
# clip the small negative values possibly caused by rounding
# errors before taking the square root
t = rs * np.sqrt((dof/((rs+1.0)*(1.0-rs))).clip(0))
t, prob = _ttest_finish(dof, t, alternative)
# For backwards compatibility, return scalars when comparing 2 columns
if rs.shape == (2, 2):
return SpearmanrResult(rs[1, 0], prob[1, 0])
else:
rs[variable_has_nan, :] = np.nan
rs[:, variable_has_nan] = np.nan
return SpearmanrResult(rs, prob)
PointbiserialrResult = namedtuple('PointbiserialrResult',
('correlation', 'pvalue'))
def pointbiserialr(x, y):
r"""Calculate a point biserial correlation coefficient and its p-value.
The point biserial correlation is used to measure the relationship
between a binary variable, x, and a continuous variable, y. Like other
correlation coefficients, this one varies between -1 and +1 with 0
implying no correlation. Correlations of -1 or +1 imply a determinative
relationship.
This function uses a shortcut formula but produces the same result as
`pearsonr`.
Parameters
----------
x : array_like of bools
Input array.
y : array_like
Input array.
Returns
-------
correlation : float
R value.
pvalue : float
Two-sided p-value.
Notes
-----
`pointbiserialr` uses a t-test with ``n-1`` degrees of freedom.
It is equivalent to `pearsonr`.
The value of the point-biserial correlation can be calculated from:
.. math::
r_{pb} = \frac{\overline{Y_{1}} -
\overline{Y_{0}}}{s_{y}}\sqrt{\frac{N_{1} N_{2}}{N (N - 1))}}
Where :math:`Y_{0}` and :math:`Y_{1}` are means of the metric
observations coded 0 and 1 respectively; :math:`N_{0}` and :math:`N_{1}`
are number of observations coded 0 and 1 respectively; :math:`N` is the
total number of observations and :math:`s_{y}` is the standard
deviation of all the metric observations.
A value of :math:`r_{pb}` that is significantly different from zero is
completely equivalent to a significant difference in means between the two
groups. Thus, an independent groups t Test with :math:`N-2` degrees of
freedom may be used to test whether :math:`r_{pb}` is nonzero. The
relation between the t-statistic for comparing two independent groups and
:math:`r_{pb}` is given by:
.. math::
t = \sqrt{N - 2}\frac{r_{pb}}{\sqrt{1 - r^{2}_{pb}}}
References
----------
.. [1] J. Lev, "The Point Biserial Coefficient of Correlation", Ann. Math.
Statist., Vol. 20, no.1, pp. 125-126, 1949.
.. [2] R.F. Tate, "Correlation Between a Discrete and a Continuous
Variable. Point-Biserial Correlation.", Ann. Math. Statist., Vol. 25,
np. 3, pp. 603-607, 1954.
.. [3] D. Kornbrot "Point Biserial Correlation", In Wiley StatsRef:
Statistics Reference Online (eds N. Balakrishnan, et al.), 2014.
:doi:`10.1002/9781118445112.stat06227`
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pointbiserialr(a, b)
(0.8660254037844386, 0.011724811003954652)
>>> stats.pearsonr(a, b)
(0.86602540378443871, 0.011724811003954626)
>>> np.corrcoef(a, b)
array([[ 1. , 0.8660254],
[ 0.8660254, 1. ]])
"""
rpb, prob = pearsonr(x, y)
return PointbiserialrResult(rpb, prob)
KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue'))
def kendalltau(x, y, initial_lexsort=None, nan_policy='propagate',
method='auto', variant='b'):
"""Calculate Kendall's tau, a correlation measure for ordinal data.
Kendall's tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, and values close to -1
indicate strong disagreement. This implements two variants of Kendall's
tau: tau-b (the default) and tau-c (also known as Stuart's tau-c). These
differ only in how they are normalized to lie within the range -1 to 1;
the hypothesis tests (their p-values) are identical. Kendall's original
tau-a is not implemented separately because both tau-b and tau-c reduce
to tau-a in the absence of ties.
Parameters
----------
x, y : array_like
Arrays of rankings, of the same shape. If arrays are not 1-D, they
will be flattened to 1-D.
initial_lexsort : bool, optional
Unused (deprecated).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
method : {'auto', 'asymptotic', 'exact'}, optional
Defines which method is used to calculate the p-value [5]_.
The following options are available (default is 'auto'):
* 'auto': selects the appropriate method based on a trade-off
between speed and accuracy
* 'asymptotic': uses a normal approximation valid for large samples
* 'exact': computes the exact p-value, but can only be used if no ties
are present. As the sample size increases, the 'exact' computation
time may grow and the result may lose some precision.
variant: {'b', 'c'}, optional
Defines which variant of Kendall's tau is returned. Default is 'b'.
Returns
-------
correlation : float
The tau statistic.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
an absence of association, tau = 0.
See Also
--------
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
weightedtau : Computes a weighted version of Kendall's tau.
Notes
-----
The definition of Kendall's tau that is used is [2]_::
tau_b = (P - Q) / sqrt((P + Q + T) * (P + Q + U))
tau_c = 2 (P - Q) / (n**2 * (m - 1) / m)
where P is the number of concordant pairs, Q the number of discordant
pairs, T the number of ties only in `x`, and U the number of ties only in
`y`. If a tie occurs for the same pair in both `x` and `y`, it is not
added to either T or U. n is the total number of samples, and m is the
number of unique values in either `x` or `y`, whichever is smaller.
References
----------
.. [1] Maurice G. Kendall, "A New Measure of Rank Correlation", Biometrika
Vol. 30, No. 1/2, pp. 81-93, 1938.
.. [2] Maurice G. Kendall, "The treatment of ties in ranking problems",
Biometrika Vol. 33, No. 3, pp. 239-251. 1945.
.. [3] Gottfried E. Noether, "Elements of Nonparametric Statistics", John
Wiley & Sons, 1967.
.. [4] Peter M. Fenwick, "A new data structure for cumulative frequency
tables", Software: Practice and Experience, Vol. 24, No. 3,
pp. 327-336, 1994.
.. [5] Maurice G. Kendall, "Rank Correlation Methods" (4th Edition),
Charles Griffin & Co., 1970.
Examples
--------
>>> from scipy import stats
>>> x1 = [12, 2, 1, 12, 2]
>>> x2 = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.kendalltau(x1, x2)
>>> tau
-0.47140452079103173
>>> p_value
0.2827454599327748
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `kendalltau` must be of the same "
f"size, found x-size {x.size} and y-size {y.size}")
elif not x.size or not y.size:
# Return NaN if arrays are empty
return KendalltauResult(np.nan, np.nan)
# check both x and y
cnx, npx = _contains_nan(x, nan_policy)
cny, npy = _contains_nan(y, nan_policy)
contains_nan = cnx or cny
if npx == 'omit' or npy == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'propagate':
return KendalltauResult(np.nan, np.nan)
elif contains_nan and nan_policy == 'omit':
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
if variant == 'b':
return mstats_basic.kendalltau(x, y, method=method, use_ties=True)
else:
raise ValueError("Only variant 'b' is supported for masked arrays")
if initial_lexsort is not None: # deprecate to drop!
warnings.warn('"initial_lexsort" is gone!')
def count_rank_tie(ranks):
cnt = np.bincount(ranks).astype('int64', copy=False)
cnt = cnt[cnt > 1]
return ((cnt * (cnt - 1) // 2).sum(),
(cnt * (cnt - 1.) * (cnt - 2)).sum(),
(cnt * (cnt - 1.) * (2*cnt + 5)).sum())
size = x.size
perm = np.argsort(y) # sort on y and convert y to dense ranks
x, y = x[perm], y[perm]
y = np.r_[True, y[1:] != y[:-1]].cumsum(dtype=np.intp)
# stable sort on x and convert x to dense ranks
perm = np.argsort(x, kind='mergesort')
x, y = x[perm], y[perm]
x = np.r_[True, x[1:] != x[:-1]].cumsum(dtype=np.intp)
dis = _kendall_dis(x, y) # discordant pairs
obs = np.r_[True, (x[1:] != x[:-1]) | (y[1:] != y[:-1]), True]
cnt = np.diff(np.nonzero(obs)[0]).astype('int64', copy=False)
ntie = (cnt * (cnt - 1) // 2).sum() # joint ties
xtie, x0, x1 = count_rank_tie(x) # ties in x, stats
ytie, y0, y1 = count_rank_tie(y) # ties in y, stats
tot = (size * (size - 1)) // 2
if xtie == tot or ytie == tot:
return KendalltauResult(np.nan, np.nan)
# Note that tot = con + dis + (xtie - ntie) + (ytie - ntie) + ntie
# = con + dis + xtie + ytie - ntie
con_minus_dis = tot - xtie - ytie + ntie - 2 * dis
if variant == 'b':
tau = con_minus_dis / np.sqrt(tot - xtie) / np.sqrt(tot - ytie)
elif variant == 'c':
minclasses = min(len(set(x)), len(set(y)))
tau = 2*con_minus_dis / (size**2 * (minclasses-1)/minclasses)
else:
raise ValueError(f"Unknown variant of the method chosen: {variant}. "
"variant must be 'b' or 'c'.")
# Limit range to fix computational errors
tau = min(1., max(-1., tau))
# The p-value calculation is the same for all variants since the p-value
# depends only on con_minus_dis.
if method == 'exact' and (xtie != 0 or ytie != 0):
raise ValueError("Ties found, exact method cannot be used.")
if method == 'auto':
if (xtie == 0 and ytie == 0) and (size <= 33 or
min(dis, tot-dis) <= 1):
method = 'exact'
else:
method = 'asymptotic'
if xtie == 0 and ytie == 0 and method == 'exact':
pvalue = mstats_basic._kendall_p_exact(size, min(dis, tot-dis))
elif method == 'asymptotic':
# con_minus_dis is approx normally distributed with this variance [3]_
m = size * (size - 1.)
var = ((m * (2*size + 5) - x1 - y1) / 18 +
(2 * xtie * ytie) / m + x0 * y0 / (9 * m * (size - 2)))
pvalue = (special.erfc(np.abs(con_minus_dis) /
np.sqrt(var) / np.sqrt(2)))
else:
raise ValueError(f"Unknown method {method} specified. Use 'auto', "
"'exact' or 'asymptotic'.")
return KendalltauResult(tau, pvalue)
WeightedTauResult = namedtuple('WeightedTauResult', ('correlation', 'pvalue'))
def weightedtau(x, y, rank=True, weigher=None, additive=True):
r"""Compute a weighted version of Kendall's :math:`\tau`.
The weighted :math:`\tau` is a weighted version of Kendall's
:math:`\tau` in which exchanges of high weight are more influential than
exchanges of low weight. The default parameters compute the additive
hyperbolic version of the index, :math:`\tau_\mathrm h`, which has
been shown to provide the best balance between important and
unimportant elements [1]_.
The weighting is defined by means of a rank array, which assigns a
nonnegative rank to each element (higher importance ranks being
associated with smaller values, e.g., 0 is the highest possible rank),
and a weigher function, which assigns a weight based on the rank to
each element. The weight of an exchange is then the sum or the product
of the weights of the ranks of the exchanged elements. The default
parameters compute :math:`\tau_\mathrm h`: an exchange between
elements with rank :math:`r` and :math:`s` (starting from zero) has
weight :math:`1/(r+1) + 1/(s+1)`.
Specifying a rank array is meaningful only if you have in mind an
external criterion of importance. If, as it usually happens, you do
not have in mind a specific rank, the weighted :math:`\tau` is
defined by averaging the values obtained using the decreasing
lexicographical rank by (`x`, `y`) and by (`y`, `x`). This is the
behavior with default parameters. Note that the convention used
here for ranking (lower values imply higher importance) is opposite
to that used by other SciPy statistical functions.
Parameters
----------
x, y : array_like
Arrays of scores, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
rank : array_like of ints or bool, optional
A nonnegative rank assigned to each element. If it is None, the
decreasing lexicographical rank by (`x`, `y`) will be used: elements of
higher rank will be those with larger `x`-values, using `y`-values to
break ties (in particular, swapping `x` and `y` will give a different
result). If it is False, the element indices will be used
directly as ranks. The default is True, in which case this
function returns the average of the values obtained using the
decreasing lexicographical rank by (`x`, `y`) and by (`y`, `x`).
weigher : callable, optional
The weigher function. Must map nonnegative integers (zero
representing the most important element) to a nonnegative weight.
The default, None, provides hyperbolic weighing, that is,
rank :math:`r` is mapped to weight :math:`1/(r+1)`.
additive : bool, optional
If True, the weight of an exchange is computed by adding the
weights of the ranks of the exchanged elements; otherwise, the weights
are multiplied. The default is True.
Returns
-------
correlation : float
The weighted :math:`\tau` correlation index.
pvalue : float
Presently ``np.nan``, as the null statistics is unknown (even in the
additive hyperbolic case).
See Also
--------
kendalltau : Calculates Kendall's tau.
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
Notes
-----
This function uses an :math:`O(n \log n)`, mergesort-based algorithm
[1]_ that is a weighted extension of Knight's algorithm for Kendall's
:math:`\tau` [2]_. It can compute Shieh's weighted :math:`\tau` [3]_
between rankings without ties (i.e., permutations) by setting
`additive` and `rank` to False, as the definition given in [1]_ is a
generalization of Shieh's.
NaNs are considered the smallest possible score.
.. versionadded:: 0.19.0
References
----------
.. [1] Sebastiano Vigna, "A weighted correlation index for rankings with
ties", Proceedings of the 24th international conference on World
Wide Web, pp. 1166-1176, ACM, 2015.
.. [2] W.R. Knight, "A Computer Method for Calculating Kendall's Tau with
Ungrouped Data", Journal of the American Statistical Association,
Vol. 61, No. 314, Part 1, pp. 436-439, 1966.
.. [3] Grace S. Shieh. "A weighted Kendall's tau statistic", Statistics &
Probability Letters, Vol. 39, No. 1, pp. 17-24, 1998.
Examples
--------
>>> from scipy import stats
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.weightedtau(x, y)
>>> tau
-0.56694968153682723
>>> p_value
nan
>>> tau, p_value = stats.weightedtau(x, y, additive=False)
>>> tau
-0.62205716951801038
NaNs are considered the smallest possible score:
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, np.nan]
>>> tau, _ = stats.weightedtau(x, y)
>>> tau
-0.56694968153682723
This is exactly Kendall's tau:
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> tau, _ = stats.weightedtau(x, y, weigher=lambda x: 1)
>>> tau
-0.47140452079103173
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> stats.weightedtau(x, y, rank=None)
WeightedTauResult(correlation=-0.4157652301037516, pvalue=nan)
>>> stats.weightedtau(y, x, rank=None)
WeightedTauResult(correlation=-0.7181341329699028, pvalue=nan)
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `weightedtau` must be "
"of the same size, "
"found x-size %s and y-size %s" % (x.size, y.size))
if not x.size:
# Return NaN if arrays are empty
return WeightedTauResult(np.nan, np.nan)
# If there are NaNs we apply _toint64()
if np.isnan(np.sum(x)):
x = _toint64(x)
if np.isnan(np.sum(y)):
y = _toint64(y)
# Reduce to ranks unsupported types
if x.dtype != y.dtype:
if x.dtype != np.int64:
x = _toint64(x)
if y.dtype != np.int64:
y = _toint64(y)
else:
if x.dtype not in (np.int32, np.int64, np.float32, np.float64):
x = _toint64(x)
y = _toint64(y)
if rank is True:
return WeightedTauResult((
_weightedrankedtau(x, y, None, weigher, additive) +
_weightedrankedtau(y, x, None, weigher, additive)
) / 2, np.nan)
if rank is False:
rank = np.arange(x.size, dtype=np.intp)
elif rank is not None:
rank = np.asarray(rank).ravel()
if rank.size != x.size:
raise ValueError(
"All inputs to `weightedtau` must be of the same size, "
"found x-size %s and rank-size %s" % (x.size, rank.size)
)
return WeightedTauResult(_weightedrankedtau(x, y, rank, weigher, additive),
np.nan)
# FROM MGCPY: https://github.com/neurodata/mgcpy
class _ParallelP:
"""Helper function to calculate parallel p-value."""
def __init__(self, x, y, random_states):
self.x = x
self.y = y
self.random_states = random_states
def __call__(self, index):
order = self.random_states[index].permutation(self.y.shape[0])
permy = self.y[order][:, order]
# calculate permuted stats, store in null distribution
perm_stat = _mgc_stat(self.x, permy)[0]
return perm_stat
def _perm_test(x, y, stat, reps=1000, workers=-1, random_state=None):
r"""Helper function that calculates the p-value. See below for uses.
Parameters
----------
x, y : ndarray
`x` and `y` have shapes `(n, p)` and `(n, q)`.
stat : float
The sample test statistic.
reps : int, optional
The number of replications used to estimate the null when using the
permutation test. The default is 1000 replications.
workers : int or map-like callable, optional
If `workers` is an int the population is subdivided into `workers`
sections and evaluated in parallel (uses
`multiprocessing.Pool <multiprocessing>`). Supply `-1` to use all cores
available to the Process. Alternatively supply a map-like callable,
such as `multiprocessing.Pool.map` for evaluating the population in
parallel. This evaluation is carried out as `workers(func, iterable)`.
Requires that `func` be pickleable.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Returns
-------
pvalue : float
The sample test p-value.
null_dist : list
The approximated null distribution.
"""
# generate seeds for each rep (change to new parallel random number
# capabilities in numpy >= 1.17+)
random_state = check_random_state(random_state)
random_states = [np.random.RandomState(rng_integers(random_state, 1 << 32,
size=4, dtype=np.uint32)) for _ in range(reps)]
# parallelizes with specified workers over number of reps and set seeds
parallelp = _ParallelP(x=x, y=y, random_states=random_states)
with MapWrapper(workers) as mapwrapper:
null_dist = np.array(list(mapwrapper(parallelp, range(reps))))
# calculate p-value and significant permutation map through list
pvalue = (null_dist >= stat).sum() / reps
# correct for a p-value of 0. This is because, with bootstrapping
# permutations, a p-value of 0 is incorrect
if pvalue == 0:
pvalue = 1 / reps
return pvalue, null_dist
def _euclidean_dist(x):
return cdist(x, x)
MGCResult = namedtuple('MGCResult', ('stat', 'pvalue', 'mgc_dict'))
def multiscale_graphcorr(x, y, compute_distance=_euclidean_dist, reps=1000,
workers=1, is_twosamp=False, random_state=None):
r"""Computes the Multiscale Graph Correlation (MGC) test statistic.
Specifically, for each point, MGC finds the :math:`k`-nearest neighbors for
one property (e.g. cloud density), and the :math:`l`-nearest neighbors for
the other property (e.g. grass wetness) [1]_. This pair :math:`(k, l)` is
called the "scale". A priori, however, it is not know which scales will be
most informative. So, MGC computes all distance pairs, and then efficiently
computes the distance correlations for all scales. The local correlations
illustrate which scales are relatively informative about the relationship.
The key, therefore, to successfully discover and decipher relationships
between disparate data modalities is to adaptively determine which scales
are the most informative, and the geometric implication for the most
informative scales. Doing so not only provides an estimate of whether the
modalities are related, but also provides insight into how the
determination was made. This is especially important in high-dimensional
data, where simple visualizations do not reveal relationships to the
unaided human eye. Characterizations of this implementation in particular
have been derived from and benchmarked within in [2]_.
Parameters
----------
x, y : ndarray
If ``x`` and ``y`` have shapes ``(n, p)`` and ``(n, q)`` where `n` is
the number of samples and `p` and `q` are the number of dimensions,
then the MGC independence test will be run. Alternatively, ``x`` and
``y`` can have shapes ``(n, n)`` if they are distance or similarity
matrices, and ``compute_distance`` must be sent to ``None``. If ``x``
and ``y`` have shapes ``(n, p)`` and ``(m, p)``, an unpaired
two-sample MGC test will be run.
compute_distance : callable, optional
A function that computes the distance or similarity among the samples
within each data matrix. Set to ``None`` if ``x`` and ``y`` are
already distance matrices. The default uses the euclidean norm metric.
If you are calling a custom function, either create the distance
matrix before-hand or create a function of the form
``compute_distance(x)`` where `x` is the data matrix for which
pairwise distances are calculated.
reps : int, optional
The number of replications used to estimate the null when using the
permutation test. The default is ``1000``.
workers : int or map-like callable, optional
If ``workers`` is an int the population is subdivided into ``workers``
sections and evaluated in parallel (uses ``multiprocessing.Pool
<multiprocessing>``). Supply ``-1`` to use all cores available to the
Process. Alternatively supply a map-like callable, such as
``multiprocessing.Pool.map`` for evaluating the p-value in parallel.
This evaluation is carried out as ``workers(func, iterable)``.
Requires that `func` be pickleable. The default is ``1``.
is_twosamp : bool, optional
If `True`, a two sample test will be run. If ``x`` and ``y`` have
shapes ``(n, p)`` and ``(m, p)``, this optional will be overriden and
set to ``True``. Set to ``True`` if ``x`` and ``y`` both have shapes
``(n, p)`` and a two sample test is desired. The default is ``False``.
Note that this will not run if inputs are distance matrices.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Returns
-------
stat : float
The sample MGC test statistic within `[-1, 1]`.
pvalue : float
The p-value obtained via permutation.
mgc_dict : dict
Contains additional useful additional returns containing the following
keys:
- mgc_map : ndarray
A 2D representation of the latent geometry of the relationship.
of the relationship.
- opt_scale : (int, int)
The estimated optimal scale as a `(x, y)` pair.
- null_dist : list
The null distribution derived from the permuted matrices
See Also
--------
pearsonr : Pearson correlation coefficient and p-value for testing
non-correlation.
kendalltau : Calculates Kendall's tau.
spearmanr : Calculates a Spearman rank-order correlation coefficient.
Notes
-----
A description of the process of MGC and applications on neuroscience data
can be found in [1]_. It is performed using the following steps:
#. Two distance matrices :math:`D^X` and :math:`D^Y` are computed and
modified to be mean zero columnwise. This results in two
:math:`n \times n` distance matrices :math:`A` and :math:`B` (the
centering and unbiased modification) [3]_.
#. For all values :math:`k` and :math:`l` from :math:`1, ..., n`,
* The :math:`k`-nearest neighbor and :math:`l`-nearest neighbor graphs
are calculated for each property. Here, :math:`G_k (i, j)` indicates
the :math:`k`-smallest values of the :math:`i`-th row of :math:`A`
and :math:`H_l (i, j)` indicates the :math:`l` smallested values of
the :math:`i`-th row of :math:`B`
* Let :math:`\circ` denotes the entry-wise matrix product, then local
correlations are summed and normalized using the following statistic:
.. math::
c^{kl} = \frac{\sum_{ij} A G_k B H_l}
{\sqrt{\sum_{ij} A^2 G_k \times \sum_{ij} B^2 H_l}}
#. The MGC test statistic is the smoothed optimal local correlation of
:math:`\{ c^{kl} \}`. Denote the smoothing operation as :math:`R(\cdot)`
(which essentially set all isolated large correlations) as 0 and
connected large correlations the same as before, see [3]_.) MGC is,
.. math::
MGC_n (x, y) = \max_{(k, l)} R \left(c^{kl} \left( x_n, y_n \right)
\right)
The test statistic returns a value between :math:`(-1, 1)` since it is
normalized.
The p-value returned is calculated using a permutation test. This process
is completed by first randomly permuting :math:`y` to estimate the null
distribution and then calculating the probability of observing a test
statistic, under the null, at least as extreme as the observed test
statistic.
MGC requires at least 5 samples to run with reliable results. It can also
handle high-dimensional data sets.
In addition, by manipulating the input data matrices, the two-sample
testing problem can be reduced to the independence testing problem [4]_.
Given sample data :math:`U` and :math:`V` of sizes :math:`p \times n`
:math:`p \times m`, data matrix :math:`X` and :math:`Y` can be created as
follows:
.. math::
X = [U | V] \in \mathcal{R}^{p \times (n + m)}
Y = [0_{1 \times n} | 1_{1 \times m}] \in \mathcal{R}^{(n + m)}
Then, the MGC statistic can be calculated as normal. This methodology can
be extended to similar tests such as distance correlation [4]_.
.. versionadded:: 1.4.0
References
----------
.. [1] Vogelstein, J. T., Bridgeford, E. W., Wang, Q., Priebe, C. E.,
Maggioni, M., & Shen, C. (2019). Discovering and deciphering
relationships across disparate data modalities. ELife.
.. [2] Panda, S., Palaniappan, S., Xiong, J., Swaminathan, A.,
Ramachandran, S., Bridgeford, E. W., ... Vogelstein, J. T. (2019).
mgcpy: A Comprehensive High Dimensional Independence Testing Python
Package. :arXiv:`1907.02088`
.. [3] Shen, C., Priebe, C.E., & Vogelstein, J. T. (2019). From distance
correlation to multiscale graph correlation. Journal of the American
Statistical Association.
.. [4] Shen, C. & Vogelstein, J. T. (2018). The Exact Equivalence of
Distance and Kernel Methods for Hypothesis Testing.
:arXiv:`1806.05514`
Examples
--------
>>> from scipy.stats import multiscale_graphcorr
>>> x = np.arange(100)
>>> y = x
>>> stat, pvalue, _ = multiscale_graphcorr(x, y, workers=-1)
>>> '%.1f, %.3f' % (stat, pvalue)
'1.0, 0.001'
Alternatively,
>>> x = np.arange(100)
>>> y = x
>>> mgc = multiscale_graphcorr(x, y)
>>> '%.1f, %.3f' % (mgc.stat, mgc.pvalue)
'1.0, 0.001'
To run an unpaired two-sample test,
>>> x = np.arange(100)
>>> y = np.arange(79)
>>> mgc = multiscale_graphcorr(x, y)
>>> '%.3f, %.2f' % (mgc.stat, mgc.pvalue) # doctest: +SKIP
'0.033, 0.02'
or, if shape of the inputs are the same,
>>> x = np.arange(100)
>>> y = x
>>> mgc = multiscale_graphcorr(x, y, is_twosamp=True)
>>> '%.3f, %.1f' % (mgc.stat, mgc.pvalue) # doctest: +SKIP
'-0.008, 1.0'
"""
if not isinstance(x, np.ndarray) or not isinstance(y, np.ndarray):
raise ValueError("x and y must be ndarrays")
# convert arrays of type (n,) to (n, 1)
if x.ndim == 1:
x = x[:, np.newaxis]
elif x.ndim != 2:
raise ValueError("Expected a 2-D array `x`, found shape "
"{}".format(x.shape))
if y.ndim == 1:
y = y[:, np.newaxis]
elif y.ndim != 2:
raise ValueError("Expected a 2-D array `y`, found shape "
"{}".format(y.shape))
nx, px = x.shape
ny, py = y.shape
# check for NaNs
_contains_nan(x, nan_policy='raise')
_contains_nan(y, nan_policy='raise')
# check for positive or negative infinity and raise error
if np.sum(np.isinf(x)) > 0 or np.sum(np.isinf(y)) > 0:
raise ValueError("Inputs contain infinities")
if nx != ny:
if px == py:
# reshape x and y for two sample testing
is_twosamp = True
else:
raise ValueError("Shape mismatch, x and y must have shape [n, p] "
"and [n, q] or have shape [n, p] and [m, p].")
if nx < 5 or ny < 5:
raise ValueError("MGC requires at least 5 samples to give reasonable "
"results.")
# convert x and y to float
x = x.astype(np.float64)
y = y.astype(np.float64)
# check if compute_distance_matrix if a callable()
if not callable(compute_distance) and compute_distance is not None:
raise ValueError("Compute_distance must be a function.")
# check if number of reps exists, integer, or > 0 (if under 1000 raises
# warning)
if not isinstance(reps, int) or reps < 0:
raise ValueError("Number of reps must be an integer greater than 0.")
elif reps < 1000:
msg = ("The number of replications is low (under 1000), and p-value "
"calculations may be unreliable. Use the p-value result, with "
"caution!")
warnings.warn(msg, RuntimeWarning)
if is_twosamp:
if compute_distance is None:
raise ValueError("Cannot run if inputs are distance matrices")
x, y = _two_sample_transform(x, y)
if compute_distance is not None:
# compute distance matrices for x and y
x = compute_distance(x)
y = compute_distance(y)
# calculate MGC stat
stat, stat_dict = _mgc_stat(x, y)
stat_mgc_map = stat_dict["stat_mgc_map"]
opt_scale = stat_dict["opt_scale"]
# calculate permutation MGC p-value
pvalue, null_dist = _perm_test(x, y, stat, reps=reps, workers=workers,
random_state=random_state)
# save all stats (other than stat/p-value) in dictionary
mgc_dict = {"mgc_map": stat_mgc_map,
"opt_scale": opt_scale,
"null_dist": null_dist}
return MGCResult(stat, pvalue, mgc_dict)
def _mgc_stat(distx, disty):
r"""Helper function that calculates the MGC stat. See above for use.
Parameters
----------
x, y : ndarray
`x` and `y` have shapes `(n, p)` and `(n, q)` or `(n, n)` and `(n, n)`
if distance matrices.
Returns
-------
stat : float
The sample MGC test statistic within `[-1, 1]`.
stat_dict : dict
Contains additional useful additional returns containing the following
keys:
- stat_mgc_map : ndarray
MGC-map of the statistics.
- opt_scale : (float, float)
The estimated optimal scale as a `(x, y)` pair.
"""
# calculate MGC map and optimal scale
stat_mgc_map = _local_correlations(distx, disty, global_corr='mgc')
n, m = stat_mgc_map.shape
if m == 1 or n == 1:
# the global scale at is the statistic calculated at maximial nearest
# neighbors. There is not enough local scale to search over, so
# default to global scale
stat = stat_mgc_map[m - 1][n - 1]
opt_scale = m * n
else:
samp_size = len(distx) - 1
# threshold to find connected region of significant local correlations
sig_connect = _threshold_mgc_map(stat_mgc_map, samp_size)
# maximum within the significant region
stat, opt_scale = _smooth_mgc_map(sig_connect, stat_mgc_map)
stat_dict = {"stat_mgc_map": stat_mgc_map,
"opt_scale": opt_scale}
return stat, stat_dict
def _threshold_mgc_map(stat_mgc_map, samp_size):
r"""
Finds a connected region of significance in the MGC-map by thresholding.
Parameters
----------
stat_mgc_map : ndarray
All local correlations within `[-1,1]`.
samp_size : int
The sample size of original data.
Returns
-------
sig_connect : ndarray
A binary matrix with 1's indicating the significant region.
"""
m, n = stat_mgc_map.shape
# 0.02 is simply an empirical threshold, this can be set to 0.01 or 0.05
# with varying levels of performance. Threshold is based on a beta
# approximation.
per_sig = 1 - (0.02 / samp_size) # Percentile to consider as significant
threshold = samp_size * (samp_size - 3)/4 - 1/2 # Beta approximation
threshold = distributions.beta.ppf(per_sig, threshold, threshold) * 2 - 1
# the global scale at is the statistic calculated at maximial nearest
# neighbors. Threshold is the maximium on the global and local scales
threshold = max(threshold, stat_mgc_map[m - 1][n - 1])
# find the largest connected component of significant correlations
sig_connect = stat_mgc_map > threshold
if np.sum(sig_connect) > 0:
sig_connect, _ = measurements.label(sig_connect)
_, label_counts = np.unique(sig_connect, return_counts=True)
# skip the first element in label_counts, as it is count(zeros)
max_label = np.argmax(label_counts[1:]) + 1
sig_connect = sig_connect == max_label
else:
sig_connect = np.array([[False]])
return sig_connect
def _smooth_mgc_map(sig_connect, stat_mgc_map):
"""Finds the smoothed maximal within the significant region R.
If area of R is too small it returns the last local correlation. Otherwise,
returns the maximum within significant_connected_region.
Parameters
----------
sig_connect: ndarray
A binary matrix with 1's indicating the significant region.
stat_mgc_map: ndarray
All local correlations within `[-1, 1]`.
Returns
-------
stat : float
The sample MGC statistic within `[-1, 1]`.
opt_scale: (float, float)
The estimated optimal scale as an `(x, y)` pair.
"""
m, n = stat_mgc_map.shape
# the global scale at is the statistic calculated at maximial nearest
# neighbors. By default, statistic and optimal scale are global.
stat = stat_mgc_map[m - 1][n - 1]
opt_scale = [m, n]
if np.linalg.norm(sig_connect) != 0:
# proceed only when the connected region's area is sufficiently large
# 0.02 is simply an empirical threshold, this can be set to 0.01 or 0.05
# with varying levels of performance
if np.sum(sig_connect) >= np.ceil(0.02 * max(m, n)) * min(m, n):
max_corr = max(stat_mgc_map[sig_connect])
# find all scales within significant_connected_region that maximize
# the local correlation
max_corr_index = np.where((stat_mgc_map >= max_corr) & sig_connect)
if max_corr >= stat:
stat = max_corr
k, l = max_corr_index
one_d_indices = k * n + l # 2D to 1D indexing
k = np.max(one_d_indices) // n
l = np.max(one_d_indices) % n
opt_scale = [k+1, l+1] # adding 1s to match R indexing
return stat, opt_scale
def _two_sample_transform(u, v):
"""Helper function that concatenates x and y for two sample MGC stat.
See above for use.
Parameters
----------
u, v : ndarray
`u` and `v` have shapes `(n, p)` and `(m, p)`.
Returns
-------
x : ndarray
Concatenate `u` and `v` along the `axis = 0`. `x` thus has shape
`(2n, p)`.
y : ndarray
Label matrix for `x` where 0 refers to samples that comes from `u` and
1 refers to samples that come from `v`. `y` thus has shape `(2n, 1)`.
"""
nx = u.shape[0]
ny = v.shape[0]
x = np.concatenate([u, v], axis=0)
y = np.concatenate([np.zeros(nx), np.ones(ny)], axis=0).reshape(-1, 1)
return x, y
#####################################
# INFERENTIAL STATISTICS #
#####################################
Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue'))
def ttest_1samp(a, popmean, axis=0, nan_policy='propagate',
alternative="two-sided"):
"""Calculate the T-test for the mean of ONE group of scores.
This is a two-sided test for the null hypothesis that the expected value
(mean) of a sample of independent observations `a` is equal to the given
population mean, `popmean`.
Parameters
----------
a : array_like
Sample observation.
popmean : float or array_like
Expected value in null hypothesis. If array_like, then it must have the
same shape as `a` excluding the axis dimension.
axis : int or None, optional
Axis along which to compute test; default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
.. versionadded:: 1.6.0
Returns
-------
statistic : float or array
t-statistic.
pvalue : float or array
Two-sided p-value.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50, 2), random_state=rng)
Test if mean of random sample is equal to true mean, and different mean.
We reject the null hypothesis in the second case and don't reject it in
the first case.
>>> stats.ttest_1samp(rvs, 5.0)
Ttest_1sampResult(statistic=array([-2.09794637, -1.75977004]), pvalue=array([0.04108952, 0.08468867]))
>>> stats.ttest_1samp(rvs, 0.0)
Ttest_1sampResult(statistic=array([1.64495065, 1.62095307]), pvalue=array([0.10638103, 0.11144602]))
Examples using axis and non-scalar dimension for population mean.
>>> result = stats.ttest_1samp(rvs, [5.0, 0.0])
>>> result.statistic
array([-2.09794637, 1.62095307])
>>> result.pvalue
array([0.04108952, 0.11144602])
>>> result = stats.ttest_1samp(rvs.T, [5.0, 0.0], axis=1)
>>> result.statistic
array([-2.09794637, 1.62095307])
>>> result.pvalue
array([0.04108952, 0.11144602])
>>> result = stats.ttest_1samp(rvs, [[5.0], [0.0]])
>>> result.statistic
array([[-2.09794637, -1.75977004],
[ 1.64495065, 1.62095307]])
>>> result.pvalue
array([[0.04108952, 0.08468867],
[0.10638103, 0.11144602]])
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
if alternative != 'two-sided':
raise ValueError("nan-containing/masked inputs with "
"nan_policy='omit' are currently not "
"supported by one-sided alternatives.")
a = ma.masked_invalid(a)
return mstats_basic.ttest_1samp(a, popmean, axis)
n = a.shape[axis]
df = n - 1
d = np.mean(a, axis) - popmean
v = np.var(a, axis, ddof=1)
denom = np.sqrt(v / n)
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t, alternative)
return Ttest_1sampResult(t, prob)
def _ttest_finish(df, t, alternative):
"""Common code between all 3 t-test functions."""
if alternative == 'less':
prob = distributions.t.cdf(t, df)
elif alternative == 'greater':
prob = distributions.t.sf(t, df)
elif alternative == 'two-sided':
prob = 2 * distributions.t.sf(np.abs(t), df)
else:
raise ValueError("alternative must be "
"'less', 'greater' or 'two-sided'")
if t.ndim == 0:
t = t[()]
return t, prob
def _ttest_ind_from_stats(mean1, mean2, denom, df, alternative):
d = mean1 - mean2
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t, alternative)
return (t, prob)
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
with np.errstate(divide='ignore', invalid='ignore'):
df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = np.where(np.isnan(df), 1, df)
denom = np.sqrt(vn1 + vn2)
return df, denom
def _equal_var_ttest_denom(v1, n1, v2, n2):
df = n1 + n2 - 2.0
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / df
denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))
return df, denom
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True, alternative="two-sided"):
r"""
T-test for means of two independent samples from descriptive statistics.
This is a two-sided test for the null hypothesis that two independent
samples have identical average (expected) values.
Parameters
----------
mean1 : array_like
The mean(s) of sample 1.
std1 : array_like
The standard deviation(s) of sample 1.
nobs1 : array_like
The number(s) of observations of sample 1.
mean2 : array_like
The mean(s) of sample 2.
std2 : array_like
The standard deviations(s) of sample 2.
nobs2 : array_like
The number(s) of observations of sample 2.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
.. versionadded:: 1.6.0
Returns
-------
statistic : float or array
The calculated t-statistics.
pvalue : float or array
The two-tailed p-value.
See Also
--------
scipy.stats.ttest_ind
Notes
-----
.. versionadded:: 0.16.0
References
----------
.. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test
Examples
--------
Suppose we have the summary data for two samples, as follows::
Sample Sample
Size Mean Variance
Sample 1 13 15.0 87.5
Sample 2 11 12.0 39.0
Apply the t-test to this data (with the assumption that the population
variances are equal):
>>> from scipy.stats import ttest_ind_from_stats
>>> ttest_ind_from_stats(mean1=15.0, std1=np.sqrt(87.5), nobs1=13,
... mean2=12.0, std2=np.sqrt(39.0), nobs2=11)
Ttest_indResult(statistic=0.9051358093310269, pvalue=0.3751996797581487)
For comparison, here is the data from which those summary statistics
were taken. With this data, we can compute the same result using
`scipy.stats.ttest_ind`:
>>> a = np.array([1, 3, 4, 6, 11, 13, 15, 19, 22, 24, 25, 26, 26])
>>> b = np.array([2, 4, 6, 9, 11, 13, 14, 15, 18, 19, 21])
>>> from scipy.stats import ttest_ind
>>> ttest_ind(a, b)
Ttest_indResult(statistic=0.905135809331027, pvalue=0.3751996797581486)
Suppose we instead have binary data and would like to apply a t-test to
compare the proportion of 1s in two independent groups::
Number of Sample Sample
Size ones Mean Variance
Sample 1 150 30 0.2 0.16
Sample 2 200 45 0.225 0.174375
The sample mean :math:`\hat{p}` is the proportion of ones in the sample
and the variance for a binary observation is estimated by
:math:`\hat{p}(1-\hat{p})`.
>>> ttest_ind_from_stats(mean1=0.2, std1=np.sqrt(0.16), nobs1=150,
... mean2=0.225, std2=np.sqrt(0.17437), nobs2=200)
Ttest_indResult(statistic=-0.564327545549774, pvalue=0.5728947691244874)
For comparison, we could compute the t statistic and p-value using
arrays of 0s and 1s and `scipy.stat.ttest_ind`, as above.
>>> group1 = np.array([1]*30 + [0]*(150-30))
>>> group2 = np.array([1]*45 + [0]*(200-45))
>>> ttest_ind(group1, group2)
Ttest_indResult(statistic=-0.5627179589855622, pvalue=0.573989277115258)
"""
mean1 = np.asarray(mean1)
std1 = np.asarray(std1)
mean2 = np.asarray(mean2)
std2 = np.asarray(std2)
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
std2**2, nobs2)
res = _ttest_ind_from_stats(mean1, mean2, denom, df, alternative)
return Ttest_indResult(*res)
def _ttest_nans(a, b, axis, namedtuple_type):
"""
Generate an array of `nan`, with shape determined by `a`, `b` and `axis`.
This function is used by ttest_ind and ttest_rel to create the return
value when one of the inputs has size 0.
The shapes of the arrays are determined by dropping `axis` from the
shapes of `a` and `b` and broadcasting what is left.
The return value is a named tuple of the type given in `namedtuple_type`.
Examples
--------
>>> a = np.zeros((9, 2))
>>> b = np.zeros((5, 1))
>>> _ttest_nans(a, b, 0, Ttest_indResult)
Ttest_indResult(statistic=array([nan, nan]), pvalue=array([nan, nan]))
>>> a = np.zeros((3, 0, 9))
>>> b = np.zeros((1, 10))
>>> stat, p = _ttest_nans(a, b, -1, Ttest_indResult)
>>> stat
array([], shape=(3, 0), dtype=float64)
>>> p
array([], shape=(3, 0), dtype=float64)
>>> a = np.zeros(10)
>>> b = np.zeros(7)
>>> _ttest_nans(a, b, 0, Ttest_indResult)
Ttest_indResult(statistic=nan, pvalue=nan)
"""
shp = _broadcast_shapes_with_dropped_axis(a, b, axis)
if len(shp) == 0:
t = np.nan
p = np.nan
else:
t = np.full(shp, fill_value=np.nan)
p = t.copy()
return namedtuple_type(t, p)
def ttest_ind(a, b, axis=0, equal_var=True, nan_policy='propagate',
permutations=None, random_state=None, alternative="two-sided",
trim=0):
"""
Calculate the T-test for the means of *two independent* samples of scores.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values. This test assumes that the
populations have identical variances by default.
Parameters
----------
a, b : array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
.. versionadded:: 0.11.0
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
The 'omit' option is not currently available for permutation tests or
one-sided asympyotic tests.
permutations : int or None (default), optional
The number of random permutations that will be used to estimate
p-values using a permutation test. If `permutations` equals or exceeds
the number of distinct permutations, an exact test is performed
instead (i.e. each distinct permutation is used exactly once).
If None (default), use the t-distribution to calculate p-values.
.. versionadded:: 1.7.0
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Pseudorandom number generator state used to generate permutations
(used only when `permutations` is not None).
.. versionadded:: 1.7.0
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
.. versionadded:: 1.6.0
trim : float, optional
If nonzero, performs a trimmed (Yuen's) t-test.
Defines the fraction of elements to be trimmed from each end of the
input samples. If 0 (default), no elements will be trimmed from either
side. The number of trimmed elements from each tail is the floor of the
trim times the number of elements. Valid range is [0, .5).
.. versionadded:: 1.7
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The two-tailed p-value.
Notes
-----
Suppose we observe two independent samples, e.g. flower petal lengths, and
we are considering whether the two samples were drawn from the same
population (e.g. the same species of flower or two species with similar
petal characteristics) or two different populations.
The t-test quantifies the difference between the arithmetic means
of the two samples. The p-value quantifies the probability of observing
as or more extreme values assuming the null hypothesis, that the
samples are drawn from populations with the same population means, is true.
A p-value larger than a chosen threshold (e.g. 5% or 1%) indicates that
our observation is not so unlikely to have occurred by chance. Therefore,
we do not reject the null hypothesis of equal population means.
If the p-value is smaller than our threshold, then we have evidence
against the null hypothesis of equal population means.
By default, the p-value is determined by comparing the t-statistic of the
observed data against a theoretical t-distribution, which assumes that the
populations are normally distributed.
When ``1 < permutations < factorial(n)``, where ``n`` is the total
number of data, the data are randomly assigned to either group `a`
or `b`, and the t-statistic is calculated. This process is performed
repeatedly (`permutation` times), generating a distribution of the
t-statistic under the null hypothesis, and the t-statistic of the observed
data is compared to this distribution to determine the p-value. When
``permutations >= factorial(n)``, an exact test is performed: the data are
permuted within and between the groups in each distinct way exactly once.
The permutation test can be computationally expensive and not necessarily
more accurate than the analytical test, but it does not make strong
assumptions about the shape of the underlying distribution.
Use of trimming is commonly referred to as the trimmed t-test. At times
called Yuen's t-test, this is an extension of Welch's t-test, with the
difference being the use of winsorized means in calculation of the variance
and the trimmed sample size in calculation of the statistic. Trimming is
reccomended if the underlying distribution is long-tailed or contaminated
with outliers [4]_.
References
----------
.. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test
.. [3] http://en.wikipedia.org/wiki/Resampling_%28statistics%29
.. [4] Yuen, Karen K. "The Two-Sample Trimmed t for Unequal Population
Variances." Biometrika, vol. 61, no. 1, 1974, pp. 165-170. JSTOR,
www.jstor.org/stable/2334299. Accessed 30 Mar. 2021.
.. [5] Yuen, Karen K., and W. J. Dixon. "The Approximate Behaviour and
Performance of the Two-Sample Trimmed t." Biometrika, vol. 60,
no. 2, 1973, pp. 369-374. JSTOR, www.jstor.org/stable/2334550.
Accessed 30 Mar. 2021.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
Test with sample with identical means:
>>> rvs1 = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
>>> rvs2 = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
>>> stats.ttest_ind(rvs1, rvs2)
Ttest_indResult(statistic=-0.4390847099199348, pvalue=0.6606952038870015)
>>> stats.ttest_ind(rvs1, rvs2, equal_var=False)
Ttest_indResult(statistic=-0.4390847099199348, pvalue=0.6606952553131064)
`ttest_ind` underestimates p for unequal variances:
>>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500, random_state=rng)
>>> stats.ttest_ind(rvs1, rvs3)
Ttest_indResult(statistic=-1.6370984482905417, pvalue=0.1019251574705033)
>>> stats.ttest_ind(rvs1, rvs3, equal_var=False)
Ttest_indResult(statistic=-1.637098448290542, pvalue=0.10202110497954867)
When ``n1 != n2``, the equal variance t-statistic is no longer equal to the
unequal variance t-statistic:
>>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100, random_state=rng)
>>> stats.ttest_ind(rvs1, rvs4)
Ttest_indResult(statistic=-1.9481646859513422, pvalue=0.05186270935842703)
>>> stats.ttest_ind(rvs1, rvs4, equal_var=False)
Ttest_indResult(statistic=-1.3146566100751664, pvalue=0.1913495266513811)
T-test with different means, variance, and n:
>>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100, random_state=rng)
>>> stats.ttest_ind(rvs1, rvs5)
Ttest_indResult(statistic=-2.8415950600298774, pvalue=0.0046418707568707885)
>>> stats.ttest_ind(rvs1, rvs5, equal_var=False)
Ttest_indResult(statistic=-1.8686598649188084, pvalue=0.06434714193919686)
When performing a permutation test, more permutations typically yields
more accurate results. Use a ``np.random.Generator`` to ensure
reproducibility:
>>> stats.ttest_ind(rvs1, rvs5, permutations=10000,
... random_state=rng)
Ttest_indResult(statistic=-2.8415950600298774, pvalue=0.0052)
Take these two samples, one of which has an extreme tail.
>>> a = (56, 128.6, 12, 123.8, 64.34, 78, 763.3)
>>> b = (1.1, 2.9, 4.2)
Use the `trim` keyword to perform a trimmed (Yuen) t-test. For example,
using 20% trimming, ``trim=.2``, the test will reduce the impact of one
(``np.floor(trim*len(a))``) element from each tail of sample `a`. It will
have no effect on sample `b` because ``np.floor(trim*len(b))`` is 0.
>>> stats.ttest_ind(a, b, trim=.2)
Ttest_indResult(statistic=3.4463884028073513,
pvalue=0.01369338726499547)
"""
if not (0 <= trim < .5):
raise ValueError("Trimming percentage should be 0 <= `trim` < .5.")
a, b, axis = _chk2_asarray(a, b, axis)
# check both a and b
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
if permutations or alternative != 'two-sided' or trim != 0:
raise ValueError("nan-containing/masked inputs with "
"nan_policy='omit' are currently not "
"supported by permutation tests, one-sided "
"asymptotic tests, or trimmed tests.")
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.ttest_ind(a, b, axis, equal_var)
if a.size == 0 or b.size == 0:
return _ttest_nans(a, b, axis, Ttest_indResult)
if permutations:
if trim != 0:
raise ValueError("Permutations are currently not supported "
"with trimming.")
if int(permutations) != permutations or permutations < 0:
raise ValueError("Permutations must be a positive integer.")
res = _permutation_ttest(a, b, permutations=permutations,
axis=axis, equal_var=equal_var,
nan_policy=nan_policy,
random_state=random_state,
alternative=alternative)
else:
n1 = a.shape[axis]
n2 = b.shape[axis]
if trim == 0:
v1 = np.var(a, axis, ddof=1)
v2 = np.var(b, axis, ddof=1)
m1 = np.mean(a, axis)
m2 = np.mean(b, axis)
else:
v1, m1, n1 = _ttest_trim_var_mean_len(a, trim, axis)
v2, m2, n2 = _ttest_trim_var_mean_len(b, trim, axis)
if equal_var:
df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
else:
df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
res = _ttest_ind_from_stats(m1, m2, denom, df, alternative)
return Ttest_indResult(*res)
def _ttest_trim_var_mean_len(a, trim, axis):
"""Variance, mean, and length of winsorized input along specified axis"""
# for use with `ttest_ind` when trimming.
# further calculations in this test assume that the inputs are sorted.
# From [4] Section 1 "Let x_1, ..., x_n be n ordered observations..."
a = np.sort(a, axis=axis)
# `g` is the number of elements to be replaced on each tail, converted
# from a percentage amount of trimming
n = a.shape[axis]
g = int(n * trim)
# Calculate the Winsorized variance of the input samples according to
# specified `g`
v = _calculate_winsorized_variance(a, g, axis)
# the total number of elements in the trimmed samples
n -= 2 * g
# calculate the g-times trimmed mean, as defined in [4] (1-1)
m = trim_mean(a, trim, axis=axis)
return v, m, n
def _calculate_winsorized_variance(a, g, axis):
"""Calculates g-times winsorized variance along specified axis"""
# it is expected that the input `a` is sorted along the correct axis
if g == 0:
return np.var(a, ddof=1, axis=axis)
# move the intended axis to the end that way it is easier to manipulate
a_win = np.moveaxis(a, axis, -1)
# save where NaNs are for later use.
nans_indices = np.any(np.isnan(a_win), axis=-1)
# Winsorization and variance calculation are done in one step in [4]
# (1-3), but here winsorization is done first; replace the left and
# right sides with the repeating value. This can be see in effect in (
# 1-3) in [4], where the leftmost and rightmost tails are replaced with
# `(g + 1) * x_{g + 1}` on the left and `(g + 1) * x_{n - g}` on the
# right. Zero-indexing turns `g + 1` to `g`, and `n - g` to `- g - 1` in
# array indexing.
a_win[..., :g] = a_win[..., [g]]
a_win[..., -g:] = a_win[..., [-g - 1]]
# Determine the variance. In [4], the degrees of freedom is expressed as
# `h - 1`, where `h = n - 2g` (unnumbered equations in Section 1, end of
# page 369, beginning of page 370). This is converted to NumPy's format,
# `n - ddof` for use with with `np.var`. The result is converted to an
# array to accommodate indexing later.
var_win = np.asarray(np.var(a_win, ddof=(2 * g + 1), axis=-1))
# with `nan_policy='propagate'`, NaNs may be completely trimmed out
# because they were sorted into the tail of the array. In these cases,
# replace computed variances with `np.nan`.
var_win[nans_indices] = np.nan
return var_win
def _broadcast_concatenate(xs, axis):
"""Concatenate arrays along an axis with broadcasting."""
# move the axis we're concatenating along to the end
xs = [np.swapaxes(x, axis, -1) for x in xs]
# determine final shape of all but the last axis
shape = np.broadcast(*[x[..., 0] for x in xs]).shape
# broadcast along all but the last axis
xs = [np.broadcast_to(x, shape + (x.shape[-1],)) for x in xs]
# concatenate along last axis
res = np.concatenate(xs, axis=-1)
# move the last axis back to where it was
res = np.swapaxes(res, axis, -1)
return res
def _data_permutations(data, n, axis=-1, random_state=None):
"""
Vectorized permutation of data, assumes `random_state` is already checked.
"""
random_state = check_random_state(random_state)
if axis < 0: # we'll be adding a new dimension at the end
axis = data.ndim + axis
# prepare permutation indices
m = data.shape[axis]
n_max = float_factorial(m) # number of distinct permutations
if n < n_max:
indices = np.array([random_state.permutation(m) for i in range(n)]).T
else:
n = n_max
indices = np.array(list(permutations(range(m)))).T
data = data.swapaxes(axis, -1) # so we can index along a new dimension
data = data[..., indices] # generate permutations
data = data.swapaxes(-2, axis) # restore original axis order
data = np.moveaxis(data, -1, 0) # permutations indexed along axis 0
return data, n
def _calc_t_stat(a, b, equal_var, axis=-1):
"""Calculate the t statistic along the given dimension."""
na = a.shape[axis]
nb = b.shape[axis]
avg_a = np.mean(a, axis=axis)
avg_b = np.mean(b, axis=axis)
var_a = np.var(a, axis=axis, ddof=1)
var_b = np.var(b, axis=axis, ddof=1)
if not equal_var:
denom = _unequal_var_ttest_denom(var_a, na, var_b, nb)[1]
else:
denom = _equal_var_ttest_denom(var_a, na, var_b, nb)[1]
return (avg_a-avg_b)/denom
def _permutation_ttest(a, b, permutations, axis=0, equal_var=True,
nan_policy='propagate', random_state=None,
alternative="two-sided"):
"""
Calculates the T-test for the means of TWO INDEPENDENT samples of scores
using permutation methods.
This test is similar to `stats.ttest_ind`, except it doesn't rely on an
approximate normality assumption since it uses a permutation test.
This function is only called from ttest_ind when permutations is not None.
Parameters
----------
a, b : array_like
The arrays must be broadcastable, except along the dimension
corresponding to `axis` (the zeroth, by default).
axis : int, optional
The axis over which to operate on a and b.
permutations: int, optional
Number of permutations used to calculate p-value. If greater than or
equal to the number of distinct permutations, perform an exact test.
equal_var: bool, optional
If False, an equal variance (Welch's) t-test is conducted. Otherwise,
an ordinary t-test is conducted.
random_state : {None, int, `numpy.random.Generator`}, optional
If `seed` is None the `numpy.random.Generator` singleton is used.
If `seed` is an int, a new ``Generator`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` instance then that instance is
used.
Pseudorandom number generator state used for generating random
permutations.
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The two-tailed p-value.
"""
random_state = check_random_state(random_state)
t_stat_observed = _calc_t_stat(a, b, equal_var, axis=axis)
na = a.shape[axis]
mat = _broadcast_concatenate((a, b), axis=axis)
mat = np.moveaxis(mat, axis, -1)
mat_perm, permutations = _data_permutations(mat, n=permutations,
random_state=random_state)
a = mat_perm[..., :na]
b = mat_perm[..., na:]
t_stat = _calc_t_stat(a, b, equal_var)
compare = {"less": np.less_equal,
"greater": np.greater_equal,
"two-sided": lambda x, y: (x <= -np.abs(y)) | (x >= np.abs(y))}
# Calculate the p-values
cmps = compare[alternative](t_stat, t_stat_observed)
pvalues = cmps.sum(axis=0) / permutations
# nans propagate naturally in statistic calculation, but need to be
# propagated manually into pvalues
if nan_policy == 'propagate' and np.isnan(t_stat_observed).any():
if np.ndim(pvalues) == 0:
pvalues = np.float64(np.nan)
else:
pvalues[np.isnan(t_stat_observed)] = np.nan
return (t_stat_observed, pvalues)
def _get_len(a, axis, msg):
try:
n = a.shape[axis]
except IndexError:
raise np.AxisError(axis, a.ndim, msg) from None
return n
Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue'))
def ttest_rel(a, b, axis=0, nan_policy='propagate', alternative="two-sided"):
"""Calculate the t-test on TWO RELATED samples of scores, a and b.
This is a two-sided test for the null hypothesis that 2 related or
repeated samples have identical average (expected) values.
Parameters
----------
a, b : array_like
The arrays must have the same shape.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
.. versionadded:: 1.6.0
Returns
-------
statistic : float or array
t-statistic.
pvalue : float or array
Two-sided p-value.
Notes
-----
Examples for use are scores of the same set of student in
different exams, or repeated sampling from the same units. The
test measures whether the average score differs significantly
across samples (e.g. exams). If we observe a large p-value, for
example greater than 0.05 or 0.1 then we cannot reject the null
hypothesis of identical average scores. If the p-value is smaller
than the threshold, e.g. 1%, 5% or 10%, then we reject the null
hypothesis of equal averages. Small p-values are associated with
large t-statistics.
References
----------
https://en.wikipedia.org/wiki/T-test#Dependent_t-test_for_paired_samples
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> rvs1 = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
>>> rvs2 = (stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
... + stats.norm.rvs(scale=0.2, size=500, random_state=rng))
>>> stats.ttest_rel(rvs1, rvs2)
Ttest_relResult(statistic=-0.4549717054410304, pvalue=0.6493274702088672)
>>> rvs3 = (stats.norm.rvs(loc=8, scale=10, size=500, random_state=rng)
... + stats.norm.rvs(scale=0.2, size=500, random_state=rng))
>>> stats.ttest_rel(rvs1, rvs3)
Ttest_relResult(statistic=-5.879467544540889, pvalue=7.540777129099917e-09)
"""
a, b, axis = _chk2_asarray(a, b, axis)
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
if alternative != 'two-sided':
raise ValueError("nan-containing/masked inputs with "
"nan_policy='omit' are currently not "
"supported by one-sided alternatives.")
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
m = ma.mask_or(ma.getmask(a), ma.getmask(b))
aa = ma.array(a, mask=m, copy=True)
bb = ma.array(b, mask=m, copy=True)
return mstats_basic.ttest_rel(aa, bb, axis)
na = _get_len(a, axis, "first argument")
nb = _get_len(b, axis, "second argument")
if na != nb:
raise ValueError('unequal length arrays')
if na == 0:
return _ttest_nans(a, b, axis, Ttest_relResult)
n = a.shape[axis]
df = n - 1
d = (a - b).astype(np.float64)
v = np.var(d, axis, ddof=1)
dm = np.mean(d, axis)
denom = np.sqrt(v / n)
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(dm, denom)
t, prob = _ttest_finish(df, t, alternative)
return Ttest_relResult(t, prob)
# Map from names to lambda_ values used in power_divergence().
_power_div_lambda_names = {
"pearson": 1,
"log-likelihood": 0,
"freeman-tukey": -0.5,
"mod-log-likelihood": -1,
"neyman": -2,
"cressie-read": 2/3,
}
def _count(a, axis=None):
"""Count the number of non-masked elements of an array.
This function behaves like `np.ma.count`, but is much faster
for ndarrays.
"""
if hasattr(a, 'count'):
num = a.count(axis=axis)
if isinstance(num, np.ndarray) and num.ndim == 0:
# In some cases, the `count` method returns a scalar array (e.g.
# np.array(3)), but we want a plain integer.
num = int(num)
else:
if axis is None:
num = a.size
else:
num = a.shape[axis]
return num
def _m_broadcast_to(a, shape):
if np.ma.isMaskedArray(a):
return np.ma.masked_array(np.broadcast_to(a, shape),
mask=np.broadcast_to(a.mask, shape))
return np.broadcast_to(a, shape, subok=True)
Power_divergenceResult = namedtuple('Power_divergenceResult',
('statistic', 'pvalue'))
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
"""Cressie-Read power divergence statistic and goodness of fit test.
This function tests the null hypothesis that the categorical data
has the given frequencies, using the Cressie-Read power divergence
statistic.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
lambda_ : float or str, optional
The power in the Cressie-Read power divergence statistic. The default
is 1. For convenience, `lambda_` may be assigned one of the following
strings, in which case the corresponding numerical value is used::
String Value Description
"pearson" 1 Pearson's chi-squared statistic.
In this case, the function is
equivalent to `stats.chisquare`.
"log-likelihood" 0 Log-likelihood ratio. Also known as
the G-test [3]_.
"freeman-tukey" -1/2 Freeman-Tukey statistic.
"mod-log-likelihood" -1 Modified log-likelihood ratio.
"neyman" -2 Neyman's statistic.
"cressie-read" 2/3 The power recommended in [5]_.
Returns
-------
statistic : float or ndarray
The Cressie-Read power divergence test statistic. The value is
a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D.
pvalue : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `stat` are scalars.
See Also
--------
chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
Also, the sum of the observed and expected frequencies must be the same
for the test to be valid; `power_divergence` raises an error if the sums
do not agree within a relative tolerance of ``1e-8``.
When `lambda_` is less than zero, the formula for the statistic involves
dividing by `f_obs`, so a warning or error may be generated if any value
in `f_obs` is 0.
Similarly, a warning or error may be generated if any value in `f_exp` is
zero when `lambda_` >= 0.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
This function handles masked arrays. If an element of `f_obs` or `f_exp`
is masked, then data at that position is ignored, and does not count
towards the size of the data set.
.. versionadded:: 0.13.0
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8.
https://web.archive.org/web/20171015035606/http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", https://en.wikipedia.org/wiki/Chi-squared_test
.. [3] "G-test", https://en.wikipedia.org/wiki/G-test
.. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and
practice of statistics in biological research", New York: Freeman
(1981)
.. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
(See `chisquare` for more examples.)
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies. Here we
perform a G-test (i.e. use the log-likelihood ratio statistic):
>>> from scipy.stats import power_divergence
>>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood')
(2.006573162632538, 0.84823476779463769)
The expected frequencies can be given with the `f_exp` argument:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[16, 16, 16, 16, 16, 8],
... lambda_='log-likelihood')
(3.3281031458963746, 0.6495419288047497)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> power_divergence(obs, lambda_="log-likelihood")
(array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> power_divergence(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> power_divergence(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
test statistic with `ddof`.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we must use ``axis=1``:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8],
... [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
# Convert the input argument `lambda_` to a numerical value.
if isinstance(lambda_, str):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
raise ValueError("invalid string for lambda_: {0!r}. "
"Valid strings are {1}".format(lambda_, names))
lambda_ = _power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
f_obs = np.asanyarray(f_obs)
f_obs_float = f_obs.astype(np.float64)
if f_exp is not None:
f_exp = np.asanyarray(f_exp)
bshape = _broadcast_shapes(f_obs_float.shape, f_exp.shape)
f_obs_float = _m_broadcast_to(f_obs_float, bshape)
f_exp = _m_broadcast_to(f_exp, bshape)
rtol = 1e-8 # to pass existing tests
with np.errstate(invalid='ignore'):
f_obs_sum = f_obs_float.sum(axis=axis)
f_exp_sum = f_exp.sum(axis=axis)
relative_diff = (np.abs(f_obs_sum - f_exp_sum) /
np.minimum(f_obs_sum, f_exp_sum))
diff_gt_tol = (relative_diff > rtol).any()
if diff_gt_tol:
msg = (f"For each axis slice, the sum of the observed "
f"frequencies must agree with the sum of the "
f"expected frequencies to a relative tolerance "
f"of {rtol}, but the percent differences are:\n"
f"{relative_diff}")
raise ValueError(msg)
else:
# Ignore 'invalid' errors so the edge case of a data set with length 0
# is handled without spurious warnings.
with np.errstate(invalid='ignore'):
f_exp = f_obs.mean(axis=axis, keepdims=True)
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs_float - f_exp)**2 / f_exp
elif lambda_ == 0:
# Log-likelihood ratio (i.e. G-test)
terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
# Modified log-likelihood ratio
terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)
else:
# General Cressie-Read power divergence.
terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
ddof = asarray(ddof)
p = distributions.chi2.sf(stat, num_obs - 1 - ddof)
return Power_divergenceResult(stat, p)
def chisquare(f_obs, f_exp=None, ddof=0, axis=0):
"""Calculate a one-way chi-square test.
The chi-square test tests the null hypothesis that the categorical data
has the given frequencies.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
Returns
-------
chisq : float or ndarray
The chi-squared test statistic. The value is a float if `axis` is
None or `f_obs` and `f_exp` are 1-D.
p : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `chisq` are scalars.
See Also
--------
scipy.stats.power_divergence
scipy.stats.fisher_exact : A more powerful alternative to the chisquare
test if any of the frequencies are less than 5.
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5. If one or more frequencies
are less than 5, Fisher's Exact Test can be used with greater statistical
power. According to [3]_, the total number of samples is recommended to be
greater than 13, otherwise a table-based method of obtaining p-values is
recommended.
Also, the sum of the observed and expected frequencies must be the same
for the test to be valid; `chisquare` raises an error if the sums do not
agree within a relative tolerance of ``1e-8``.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not chi-square, in which case this test
is not appropriate.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8.
https://web.archive.org/web/20171022032306/http://vassarstats.net:80/textbook/ch8pt1.html
.. [2] "Chi-squared test", https://en.wikipedia.org/wiki/Chi-squared_test
.. [3] Pearson, Karl. "On the criterion that a given system of deviations from the probable
in the case of a correlated system of variables is such that it can be reasonably
supposed to have arisen from random sampling", Philosophical Magazine. Series 5. 50
(1900), pp. 157-175.
Examples
--------
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies.
>>> from scipy.stats import chisquare
>>> chisquare([16, 18, 16, 14, 12, 12])
(2.0, 0.84914503608460956)
With `f_exp` the expected frequencies can be given.
>>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8])
(3.5, 0.62338762774958223)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> chisquare(obs)
(array([ 2. , 6.66666667]), array([ 0.84914504, 0.24663415]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> chisquare(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> chisquare(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
chi-squared statistic with `ddof`.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we use ``axis=1``:
>>> chisquare([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis,
lambda_="pearson")
KstestResult = namedtuple('KstestResult', ('statistic', 'pvalue'))
def _compute_dplus(cdfvals):
"""Computes D+ as used in the Kolmogorov-Smirnov test.
Parameters
----------
cdfvals: array_like
Sorted array of CDF values between 0 and 1
Returns
-------
Maximum distance of the CDF values below Uniform(0, 1)
"""
n = len(cdfvals)
return (np.arange(1.0, n + 1) / n - cdfvals).max()
def _compute_dminus(cdfvals):
"""Computes D- as used in the Kolmogorov-Smirnov test.
Parameters
----------
cdfvals: array_like
Sorted array of CDF values between 0 and 1
Returns
-------
Maximum distance of the CDF values above Uniform(0, 1)
"""
n = len(cdfvals)
return (cdfvals - np.arange(0.0, n)/n).max()
def ks_1samp(x, cdf, args=(), alternative='two-sided', mode='auto'):
"""
Performs the one-sample Kolmogorov-Smirnov test for goodness of fit.
This test compares the underlying distribution F(x) of a sample
against a given continuous distribution G(x). See Notes for a description
of the available null and alternative hypotheses.
Parameters
----------
x : array_like
a 1-D array of observations of iid random variables.
cdf : callable
callable used to calculate the cdf.
args : tuple, sequence, optional
Distribution parameters, used with `cdf`.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the null and alternative hypotheses. Default is 'two-sided'.
Please see explanations in the Notes below.
mode : {'auto', 'exact', 'approx', 'asymp'}, optional
Defines the distribution used for calculating the p-value.
The following options are available (default is 'auto'):
* 'auto' : selects one of the other options.
* 'exact' : uses the exact distribution of test statistic.
* 'approx' : approximates the two-sided probability with twice
the one-sided probability
* 'asymp': uses asymptotic distribution of test statistic
Returns
-------
statistic : float
KS test statistic, either D, D+ or D- (depending on the value
of 'alternative')
pvalue : float
One-tailed or two-tailed p-value.
See Also
--------
ks_2samp, kstest
Notes
-----
There are three options for the null and corresponding alternative
hypothesis that can be selected using the `alternative` parameter.
- `two-sided`: The null hypothesis is that the two distributions are
identical, F(x)=G(x) for all x; the alternative is that they are not
identical.
- `less`: The null hypothesis is that F(x) >= G(x) for all x; the
alternative is that F(x) < G(x) for at least one x.
- `greater`: The null hypothesis is that F(x) <= G(x) for all x; the
alternative is that F(x) > G(x) for at least one x.
Note that the alternative hypotheses describe the *CDFs* of the
underlying distributions, not the observed values. For example,
suppose x1 ~ F and x2 ~ G. If F(x) > G(x) for all x, the values in
x1 tend to be less than those in x2.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> x = np.linspace(-15, 15, 9)
>>> stats.ks_1samp(x, stats.norm.cdf)
(0.44435602715924361, 0.038850142705171065)
>>> stats.ks_1samp(stats.norm.rvs(size=100, random_state=rng),
... stats.norm.cdf)
KstestResult(statistic=0.165471391799..., pvalue=0.007331283245...)
*Test against one-sided alternative hypothesis*
Shift distribution to larger values, so that `` CDF(x) < norm.cdf(x)``:
>>> x = stats.norm.rvs(loc=0.2, size=100, random_state=rng)
>>> stats.ks_1samp(x, stats.norm.cdf, alternative='less')
KstestResult(statistic=0.100203351482..., pvalue=0.125544644447...)
Reject null hypothesis in favor of alternative hypothesis: less
>>> stats.ks_1samp(x, stats.norm.cdf, alternative='greater')
KstestResult(statistic=0.018749806388..., pvalue=0.920581859791...)
Reject null hypothesis in favor of alternative hypothesis: greater
>>> stats.ks_1samp(x, stats.norm.cdf)
KstestResult(statistic=0.100203351482..., pvalue=0.250616879765...)
Don't reject null hypothesis in favor of alternative hypothesis: two-sided
*Testing t distributed random variables against normal distribution*
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the K-S test does not reject the hypothesis that the
sample came from the normal distribution:
>>> stats.ks_1samp(stats.t.rvs(100,size=100, random_state=rng),
... stats.norm.cdf)
KstestResult(statistic=0.064273776544..., pvalue=0.778737758305...)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at the 10% level:
>>> stats.ks_1samp(stats.t.rvs(3,size=100, random_state=rng),
... stats.norm.cdf)
KstestResult(statistic=0.128678487493..., pvalue=0.066569081515...)
"""
alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get(
alternative.lower()[0], alternative)
if alternative not in ['two-sided', 'greater', 'less']:
raise ValueError("Unexpected alternative %s" % alternative)
if np.ma.is_masked(x):
x = x.compressed()
N = len(x)
x = np.sort(x)
cdfvals = cdf(x, *args)
if alternative == 'greater':
Dplus = _compute_dplus(cdfvals)
return KstestResult(Dplus, distributions.ksone.sf(Dplus, N))
if alternative == 'less':
Dminus = _compute_dminus(cdfvals)
return KstestResult(Dminus, distributions.ksone.sf(Dminus, N))
# alternative == 'two-sided':
Dplus = _compute_dplus(cdfvals)
Dminus = _compute_dminus(cdfvals)
D = np.max([Dplus, Dminus])
if mode == 'auto': # Always select exact
mode = 'exact'
if mode == 'exact':
prob = distributions.kstwo.sf(D, N)
elif mode == 'asymp':
prob = distributions.kstwobign.sf(D * np.sqrt(N))
else:
# mode == 'approx'
prob = 2 * distributions.ksone.sf(D, N)
prob = np.clip(prob, 0, 1)
return KstestResult(D, prob)
Ks_2sampResult = KstestResult
def _compute_prob_inside_method(m, n, g, h):
"""
Count the proportion of paths that stay strictly inside two diagonal lines.
Parameters
----------
m : integer
m > 0
n : integer
n > 0
g : integer
g is greatest common divisor of m and n
h : integer
0 <= h <= lcm(m,n)
Returns
-------
p : float
The proportion of paths that stay inside the two lines.
Count the integer lattice paths from (0, 0) to (m, n) which satisfy
|x/m - y/n| < h / lcm(m, n).
The paths make steps of size +1 in either positive x or positive y
directions.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk.
Hodges, J.L. Jr.,
"The Significance Probability of the Smirnov Two-Sample Test,"
Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
"""
# Probability is symmetrical in m, n. Computation below uses m >= n.
if m < n:
m, n = n, m
mg = m // g
ng = n // g
# Count the integer lattice paths from (0, 0) to (m, n) which satisfy
# |nx/g - my/g| < h.
# Compute matrix A such that:
# A(x, 0) = A(0, y) = 1
# A(x, y) = A(x, y-1) + A(x-1, y), for x,y>=1, except that
# A(x, y) = 0 if |x/m - y/n|>= h
# Probability is A(m, n)/binom(m+n, n)
# Optimizations exist for m==n, m==n*p.
# Only need to preserve a single column of A, and only a
# sliding window of it.
# minj keeps track of the slide.
minj, maxj = 0, min(int(np.ceil(h / mg)), n + 1)
curlen = maxj - minj
# Make a vector long enough to hold maximum window needed.
lenA = min(2 * maxj + 2, n + 1)
# This is an integer calculation, but the entries are essentially
# binomial coefficients, hence grow quickly.
# Scaling after each column is computed avoids dividing by a
# large binomial coefficent at the end, but is not sufficient to avoid
# the large dyanamic range which appears during the calculation.
# Instead we rescale based on the magnitude of the right most term in
# the column and keep track of an exponent separately and apply
# it at the end of the calculation. Similarly when multiplying by
# the binomial coefficint
dtype = np.float64
A = np.zeros(lenA, dtype=dtype)
# Initialize the first column
A[minj:maxj] = 1
expnt = 0
for i in range(1, m + 1):
# Generate the next column.
# First calculate the sliding window
lastminj, lastlen = minj, curlen
minj = max(int(np.floor((ng * i - h) / mg)) + 1, 0)
minj = min(minj, n)
maxj = min(int(np.ceil((ng * i + h) / mg)), n + 1)
if maxj <= minj:
return 0
# Now fill in the values
A[0:maxj - minj] = np.cumsum(A[minj - lastminj:maxj - lastminj])
curlen = maxj - minj
if lastlen > curlen:
# Set some carried-over elements to 0
A[maxj - minj:maxj - minj + (lastlen - curlen)] = 0
# Rescale if the right most value is over 2**900
val = A[maxj - minj - 1]
_, valexpt = math.frexp(val)
if valexpt > 900:
# Scaling to bring down to about 2**800 appears
# sufficient for sizes under 10000.
valexpt -= 800
A = np.ldexp(A, -valexpt)
expnt += valexpt
val = A[maxj - minj - 1]
# Now divide by the binomial (m+n)!/m!/n!
for i in range(1, n + 1):
val = (val * i) / (m + i)
_, valexpt = math.frexp(val)
if valexpt < -128:
val = np.ldexp(val, -valexpt)
expnt += valexpt
# Finally scale if needed.
return np.ldexp(val, expnt)
def _compute_prob_outside_square(n, h):
"""
Compute the proportion of paths that pass outside the two diagonal lines.
Parameters
----------
n : integer
n > 0
h : integer
0 <= h <= n
Returns
-------
p : float
The proportion of paths that pass outside the lines x-y = +/-h.
"""
# Compute Pr(D_{n,n} >= h/n)
# Prob = 2 * ( binom(2n, n-h) - binom(2n, n-2a) + binom(2n, n-3a) - ... )
# / binom(2n, n)
# This formulation exhibits subtractive cancellation.
# Instead divide each term by binom(2n, n), then factor common terms
# and use a Horner-like algorithm
# P = 2 * A0 * (1 - A1*(1 - A2*(1 - A3*(1 - A4*(...)))))
P = 0.0
k = int(np.floor(n / h))
while k >= 0:
p1 = 1.0
# Each of the Ai terms has numerator and denominator with
# h simple terms.
for j in range(h):
p1 = (n - k * h - j) * p1 / (n + k * h + j + 1)
P = p1 * (1.0 - P)
k -= 1
return 2 * P
def _count_paths_outside_method(m, n, g, h):
"""Count the number of paths that pass outside the specified diagonal.
Parameters
----------
m : integer
m > 0
n : integer
n > 0
g : integer
g is greatest common divisor of m and n
h : integer
0 <= h <= lcm(m,n)
Returns
-------
p : float
The number of paths that go low.
The calculation may overflow - check for a finite answer.
Raises
------
FloatingPointError: Raised if the intermediate computation goes outside
the range of a float.
Notes
-----
Count the integer lattice paths from (0, 0) to (m, n), which at some
point (x, y) along the path, satisfy:
m*y <= n*x - h*g
The paths make steps of size +1 in either positive x or positive y
directions.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk.
Hodges, J.L. Jr.,
"The Significance Probability of the Smirnov Two-Sample Test,"
Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
"""
# Compute #paths which stay lower than x/m-y/n = h/lcm(m,n)
# B(x, y) = #{paths from (0,0) to (x,y) without
# previously crossing the boundary}
# = binom(x, y) - #{paths which already reached the boundary}
# Multiply by the number of path extensions going from (x, y) to (m, n)
# Sum.
# Probability is symmetrical in m, n. Computation below assumes m >= n.
if m < n:
m, n = n, m
mg = m // g
ng = n // g
# Not every x needs to be considered.
# xj holds the list of x values to be checked.
# Wherever n*x/m + ng*h crosses an integer
lxj = n + (mg-h)//mg
xj = [(h + mg * j + ng-1)//ng for j in range(lxj)]
# B is an array just holding a few values of B(x,y), the ones needed.
# B[j] == B(x_j, j)
if lxj == 0:
return np.round(special.binom(m + n, n))
B = np.zeros(lxj)
B[0] = 1
# Compute the B(x, y) terms
# The binomial coefficient is an integer, but special.binom()
# may return a float. Round it to the nearest integer.
for j in range(1, lxj):
Bj = np.round(special.binom(xj[j] + j, j))
if not np.isfinite(Bj):
raise FloatingPointError()
for i in range(j):
bin = np.round(special.binom(xj[j] - xj[i] + j - i, j-i))
Bj -= bin * B[i]
B[j] = Bj
if not np.isfinite(Bj):
raise FloatingPointError()
# Compute the number of path extensions...
num_paths = 0
for j in range(lxj):
bin = np.round(special.binom((m-xj[j]) + (n - j), n-j))
term = B[j] * bin
if not np.isfinite(term):
raise FloatingPointError()
num_paths += term
return np.round(num_paths)
def _attempt_exact_2kssamp(n1, n2, g, d, alternative):
"""Attempts to compute the exact 2sample probability.
n1, n2 are the sample sizes
g is the gcd(n1, n2)
d is the computed max difference in ECDFs
Returns (success, d, probability)
"""
lcm = (n1 // g) * n2
h = int(np.round(d * lcm))
d = h * 1.0 / lcm
if h == 0:
return True, d, 1.0
saw_fp_error, prob = False, np.nan
try:
if alternative == 'two-sided':
if n1 == n2:
prob = _compute_prob_outside_square(n1, h)
else:
prob = 1 - _compute_prob_inside_method(n1, n2, g, h)
else:
if n1 == n2:
# prob = binom(2n, n-h) / binom(2n, n)
# Evaluating in that form incurs roundoff errors
# from special.binom. Instead calculate directly
jrange = np.arange(h)
prob = np.prod((n1 - jrange) / (n1 + jrange + 1.0))
else:
num_paths = _count_paths_outside_method(n1, n2, g, h)
bin = special.binom(n1 + n2, n1)
if not np.isfinite(bin) or not np.isfinite(num_paths)\
or num_paths > bin:
saw_fp_error = True
else:
prob = num_paths / bin
except FloatingPointError:
saw_fp_error = True
if saw_fp_error:
return False, d, np.nan
if not (0 <= prob <= 1):
return False, d, prob
return True, d, prob
def ks_2samp(data1, data2, alternative='two-sided', mode='auto'):
"""
Performs the two-sample Kolmogorov-Smirnov test for goodness of fit.
This test compares the underlying continuous distributions F(x) and G(x)
of two independent samples. See Notes for a description
of the available null and alternative hypotheses.
Parameters
----------
data1, data2 : array_like, 1-Dimensional
Two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the null and alternative hypotheses. Default is 'two-sided'.
Please see explanations in the Notes below.
mode : {'auto', 'exact', 'asymp'}, optional
Defines the method used for calculating the p-value.
The following options are available (default is 'auto'):
* 'auto' : use 'exact' for small size arrays, 'asymp' for large
* 'exact' : use exact distribution of test statistic
* 'asymp' : use asymptotic distribution of test statistic
Returns
-------
statistic : float
KS statistic.
pvalue : float
One-tailed or two-tailed p-value.
See Also
--------
kstest, ks_1samp, epps_singleton_2samp, anderson_ksamp
Notes
-----
There are three options for the null and corresponding alternative
hypothesis that can be selected using the `alternative` parameter.
- `two-sided`: The null hypothesis is that the two distributions are
identical, F(x)=G(x) for all x; the alternative is that they are not
identical.
- `less`: The null hypothesis is that F(x) >= G(x) for all x; the
alternative is that F(x) < G(x) for at least one x.
- `greater`: The null hypothesis is that F(x) <= G(x) for all x; the
alternative is that F(x) > G(x) for at least one x.
Note that the alternative hypotheses describe the *CDFs* of the
underlying distributions, not the observed values. For example,
suppose x1 ~ F and x2 ~ G. If F(x) > G(x) for all x, the values in
x1 tend to be less than those in x2.
If the KS statistic is small or the p-value is high, then we cannot
reject the null hypothesis in favor of the alternative.
If the mode is 'auto', the computation is exact if the sample sizes are
less than 10000. For larger sizes, the computation uses the
Kolmogorov-Smirnov distributions to compute an approximate value.
The 'two-sided' 'exact' computation computes the complementary probability
and then subtracts from 1. As such, the minimum probability it can return
is about 1e-16. While the algorithm itself is exact, numerical
errors may accumulate for large sample sizes. It is most suited to
situations in which one of the sample sizes is only a few thousand.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk [1]_.
References
----------
.. [1] Hodges, J.L. Jr., "The Significance Probability of the Smirnov
Two-Sample Test," Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
For a different distribution, we can reject the null hypothesis since the
pvalue is below 1%:
>>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1, random_state=rng)
>>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5, random_state=rng)
>>> stats.ks_2samp(rvs1, rvs2)
KstestResult(statistic=0.24833333333333332, pvalue=5.846586728086578e-07)
For a slightly different distribution, we cannot reject the null hypothesis
at a 10% or lower alpha since the p-value at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0, random_state=rng)
>>> stats.ks_2samp(rvs1, rvs3)
KstestResult(statistic=0.07833333333333334, pvalue=0.4379658456442945)
For an identical distribution, we cannot reject the null hypothesis since
the p-value is high, 41%:
>>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0, random_state=rng)
>>> stats.ks_2samp(rvs1, rvs4)
KstestResult(statistic=0.12166666666666667, pvalue=0.05401863039081145)
"""
if mode not in ['auto', 'exact', 'asymp']:
raise ValueError(f'Invalid value for mode: {mode}')
alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get(
alternative.lower()[0], alternative)
if alternative not in ['two-sided', 'less', 'greater']:
raise ValueError(f'Invalid value for alternative: {alternative}')
MAX_AUTO_N = 10000 # 'auto' will attempt to be exact if n1,n2 <= MAX_AUTO_N
if np.ma.is_masked(data1):
data1 = data1.compressed()
if np.ma.is_masked(data2):
data2 = data2.compressed()
data1 = np.sort(data1)
data2 = np.sort(data2)
n1 = data1.shape[0]
n2 = data2.shape[0]
if min(n1, n2) == 0:
raise ValueError('Data passed to ks_2samp must not be empty')
data_all = np.concatenate([data1, data2])
# using searchsorted solves equal data problem
cdf1 = np.searchsorted(data1, data_all, side='right') / n1
cdf2 = np.searchsorted(data2, data_all, side='right') / n2
cddiffs = cdf1 - cdf2
# Ensure sign of minS is not negative.
minS = np.clip(-np.min(cddiffs), 0, 1)
maxS = np.max(cddiffs)
alt2Dvalue = {'less': minS, 'greater': maxS, 'two-sided': max(minS, maxS)}
d = alt2Dvalue[alternative]
g = gcd(n1, n2)
n1g = n1 // g
n2g = n2 // g
prob = -np.inf
original_mode = mode
if mode == 'auto':
mode = 'exact' if max(n1, n2) <= MAX_AUTO_N else 'asymp'
elif mode == 'exact':
# If lcm(n1, n2) is too big, switch from exact to asymp
if n1g >= np.iinfo(np.int_).max / n2g:
mode = 'asymp'
warnings.warn(
f"Exact ks_2samp calculation not possible with samples sizes "
f"{n1} and {n2}. Switching to 'asymp'.", RuntimeWarning)
if mode == 'exact':
success, d, prob = _attempt_exact_2kssamp(n1, n2, g, d, alternative)
if not success:
mode = 'asymp'
if original_mode == 'exact':
warnings.warn(f"ks_2samp: Exact calculation unsuccessful. "
f"Switching to mode={mode}.", RuntimeWarning)
if mode == 'asymp':
# The product n1*n2 is large. Use Smirnov's asymptoptic formula.
# Ensure float to avoid overflow in multiplication
# sorted because the one-sided formula is not symmetric in n1, n2
m, n = sorted([float(n1), float(n2)], reverse=True)
en = m * n / (m + n)
if alternative == 'two-sided':
prob = distributions.kstwo.sf(d, np.round(en))
else:
z = np.sqrt(en) * d
# Use Hodges' suggested approximation Eqn 5.3
# Requires m to be the larger of (n1, n2)
expt = -2 * z**2 - 2 * z * (m + 2*n)/np.sqrt(m*n*(m+n))/3.0
prob = np.exp(expt)
prob = np.clip(prob, 0, 1)
return KstestResult(d, prob)
def _parse_kstest_args(data1, data2, args, N):
# kstest allows many different variations of arguments.
# Pull out the parsing into a separate function
# (xvals, yvals, ) # 2sample
# (xvals, cdf function,..)
# (xvals, name of distribution, ...)
# (name of distribution, name of distribution, ...)
# Returns xvals, yvals, cdf
# where cdf is a cdf function, or None
# and yvals is either an array_like of values, or None
# and xvals is array_like.
rvsfunc, cdf = None, None
if isinstance(data1, str):
rvsfunc = getattr(distributions, data1).rvs
elif callable(data1):
rvsfunc = data1
if isinstance(data2, str):
cdf = getattr(distributions, data2).cdf
data2 = None
elif callable(data2):
cdf = data2
data2 = None
data1 = np.sort(rvsfunc(*args, size=N) if rvsfunc else data1)
return data1, data2, cdf
def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='auto'):
"""
Performs the (one-sample or two-sample) Kolmogorov-Smirnov test for
goodness of fit.
The one-sample test compares the underlying distribution F(x) of a sample
against a given distribution G(x). The two-sample test compares the
underlying distributions of two independent samples. Both tests are valid
only for continuous distributions.
Parameters
----------
rvs : str, array_like, or callable
If an array, it should be a 1-D array of observations of random
variables.
If a callable, it should be a function to generate random variables;
it is required to have a keyword argument `size`.
If a string, it should be the name of a distribution in `scipy.stats`,
which will be used to generate random variables.
cdf : str, array_like or callable
If array_like, it should be a 1-D array of observations of random
variables, and the two-sample test is performed
(and rvs must be array_like).
If a callable, that callable is used to calculate the cdf.
If a string, it should be the name of a distribution in `scipy.stats`,
which will be used as the cdf function.
args : tuple, sequence, optional
Distribution parameters, used if `rvs` or `cdf` are strings or
callables.
N : int, optional
Sample size if `rvs` is string or callable. Default is 20.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the null and alternative hypotheses. Default is 'two-sided'.
Please see explanations in the Notes below.
mode : {'auto', 'exact', 'approx', 'asymp'}, optional
Defines the distribution used for calculating the p-value.
The following options are available (default is 'auto'):
* 'auto' : selects one of the other options.
* 'exact' : uses the exact distribution of test statistic.
* 'approx' : approximates the two-sided probability with twice the
one-sided probability
* 'asymp': uses asymptotic distribution of test statistic
Returns
-------
statistic : float
KS test statistic, either D, D+ or D-.
pvalue : float
One-tailed or two-tailed p-value.
See Also
--------
ks_2samp
Notes
-----
There are three options for the null and corresponding alternative
hypothesis that can be selected using the `alternative` parameter.
- `two-sided`: The null hypothesis is that the two distributions are
identical, F(x)=G(x) for all x; the alternative is that they are not
identical.
- `less`: The null hypothesis is that F(x) >= G(x) for all x; the
alternative is that F(x) < G(x) for at least one x.
- `greater`: The null hypothesis is that F(x) <= G(x) for all x; the
alternative is that F(x) > G(x) for at least one x.
Note that the alternative hypotheses describe the *CDFs* of the
underlying distributions, not the observed values. For example,
suppose x1 ~ F and x2 ~ G. If F(x) > G(x) for all x, the values in
x1 tend to be less than those in x2.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> x = np.linspace(-15, 15, 9)
>>> stats.kstest(x, 'norm')
KstestResult(statistic=0.444356027159..., pvalue=0.038850140086...)
>>> stats.kstest(stats.norm.rvs(size=100, random_state=rng), stats.norm.cdf)
KstestResult(statistic=0.165471391799..., pvalue=0.007331283245...)
The above lines are equivalent to:
>>> stats.kstest(stats.norm.rvs, 'norm', N=100)
KstestResult(statistic=0.113810164200..., pvalue=0.138690052319...) # may vary
*Test against one-sided alternative hypothesis*
Shift distribution to larger values, so that ``CDF(x) < norm.cdf(x)``:
>>> x = stats.norm.rvs(loc=0.2, size=100, random_state=rng)
>>> stats.kstest(x, 'norm', alternative='less')
KstestResult(statistic=0.1002033514..., pvalue=0.1255446444...)
Reject null hypothesis in favor of alternative hypothesis: less
>>> stats.kstest(x, 'norm', alternative='greater')
KstestResult(statistic=0.018749806388..., pvalue=0.920581859791...)
Don't reject null hypothesis in favor of alternative hypothesis: greater
>>> stats.kstest(x, 'norm')
KstestResult(statistic=0.100203351482..., pvalue=0.250616879765...)
*Testing t distributed random variables against normal distribution*
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the K-S test does not reject the hypothesis that the
sample came from the normal distribution:
>>> stats.kstest(stats.t.rvs(100, size=100, random_state=rng), 'norm')
KstestResult(statistic=0.064273776544..., pvalue=0.778737758305...)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at the 10% level:
>>> stats.kstest(stats.t.rvs(3, size=100, random_state=rng), 'norm')
KstestResult(statistic=0.128678487493..., pvalue=0.066569081515...)
"""
# to not break compatibility with existing code
if alternative == 'two_sided':
alternative = 'two-sided'
if alternative not in ['two-sided', 'greater', 'less']:
raise ValueError("Unexpected alternative %s" % alternative)
xvals, yvals, cdf = _parse_kstest_args(rvs, cdf, args, N)
if cdf:
return ks_1samp(xvals, cdf, args=args, alternative=alternative,
mode=mode)
return ks_2samp(xvals, yvals, alternative=alternative, mode=mode)
def tiecorrect(rankvals):
"""Tie correction factor for Mann-Whitney U and Kruskal-Wallis H tests.
Parameters
----------
rankvals : array_like
A 1-D sequence of ranks. Typically this will be the array
returned by `~scipy.stats.rankdata`.
Returns
-------
factor : float
Correction factor for U or H.
See Also
--------
rankdata : Assign ranks to the data
mannwhitneyu : Mann-Whitney rank test
kruskal : Kruskal-Wallis H test
References
----------
.. [1] Siegel, S. (1956) Nonparametric Statistics for the Behavioral
Sciences. New York: McGraw-Hill.
Examples
--------
>>> from scipy.stats import tiecorrect, rankdata
>>> tiecorrect([1, 2.5, 2.5, 4])
0.9
>>> ranks = rankdata([1, 3, 2, 4, 5, 7, 2, 8, 4])
>>> ranks
array([ 1. , 4. , 2.5, 5.5, 7. , 8. , 2.5, 9. , 5.5])
>>> tiecorrect(ranks)
0.9833333333333333
"""
arr = np.sort(rankvals)
idx = np.nonzero(np.r_[True, arr[1:] != arr[:-1], True])[0]
cnt = np.diff(idx).astype(np.float64)
size = np.float64(arr.size)
return 1.0 if size < 2 else 1.0 - (cnt**3 - cnt).sum() / (size**3 - size)
RanksumsResult = namedtuple('RanksumsResult', ('statistic', 'pvalue'))
def ranksums(x, y, alternative='two-sided'):
"""Compute the Wilcoxon rank-sum statistic for two samples.
The Wilcoxon rank-sum test tests the null hypothesis that two sets
of measurements are drawn from the same distribution. The alternative
hypothesis is that values in one sample are more likely to be
larger than the values in the other sample.
This test should be used to compare two samples from continuous
distributions. It does not handle ties between measurements
in x and y. For tie-handling and an optional continuity correction
see `scipy.stats.mannwhitneyu`.
Parameters
----------
x,y : array_like
The data from the two samples.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': one of the distributions (underlying `x` or `y`) is
stochastically greater than the other.
* 'less': the distribution underlying `x` is stochastically less
than the distribution underlying `y`.
* 'greater': the distribution underlying `x` is stochastically greater
than the distribution underlying `y`.
.. versionadded:: 1.7.0
Returns
-------
statistic : float
The test statistic under the large-sample approximation that the
rank sum statistic is normally distributed.
pvalue : float
The p-value of the test.
References
----------
.. [1] https://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test
Examples
--------
We can test the hypothesis that two independent unequal-sized samples are
drawn from the same distribution with computing the Wilcoxon rank-sum
statistic.
>>> from scipy.stats import ranksums
>>> rng = np.random.default_rng()
>>> sample1 = rng.uniform(-1, 1, 200)
>>> sample2 = rng.uniform(-0.5, 1.5, 300) # a shifted distribution
>>> ranksums(sample1, sample2)
RanksumsResult(statistic=-7.887059, pvalue=3.09390448e-15) # may vary
>>> ranksums(sample1, sample2, alternative='less')
RanksumsResult(statistic=-7.750585297581713, pvalue=4.573497606342543e-15) # may vary
>>> ranksums(sample1, sample2, alternative='greater')
RanksumsResult(statistic=-7.750585297581713, pvalue=0.9999999999999954) # may vary
The p-value of less than ``0.05`` indicates that this test rejects the
hypothesis at the 5% significance level.
"""
x, y = map(np.asarray, (x, y))
n1 = len(x)
n2 = len(y)
alldata = np.concatenate((x, y))
ranked = rankdata(alldata)
x = ranked[:n1]
s = np.sum(x, axis=0)
expected = n1 * (n1+n2+1) / 2.0
z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
z, prob = _normtest_finish(z, alternative)
return RanksumsResult(z, prob)
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
def kruskal(*args, nan_policy='propagate'):
"""Compute the Kruskal-Wallis H-test for independent samples.
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes. Note that rejecting the null hypothesis does not
indicate which of the groups differs. Post hoc comparisons between
groups are required to determine which groups are different.
Parameters
----------
sample1, sample2, ... : array_like
Two or more arrays with the sample measurements can be given as
arguments. Samples must be one-dimensional.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float
The Kruskal-Wallis H statistic, corrected for ties.
pvalue : float
The p-value for the test using the assumption that H has a chi
square distribution. The p-value returned is the survival function of
the chi square distribution evaluated at H.
See Also
--------
f_oneway : 1-way ANOVA.
mannwhitneyu : Mann-Whitney rank test on two samples.
friedmanchisquare : Friedman test for repeated measurements.
Notes
-----
Due to the assumption that H has a chi square distribution, the number
of samples in each group must not be too small. A typical rule is
that each sample must have at least 5 measurements.
References
----------
.. [1] W. H. Kruskal & W. W. Wallis, "Use of Ranks in
One-Criterion Variance Analysis", Journal of the American Statistical
Association, Vol. 47, Issue 260, pp. 583-621, 1952.
.. [2] https://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance
Examples
--------
>>> from scipy import stats
>>> x = [1, 3, 5, 7, 9]
>>> y = [2, 4, 6, 8, 10]
>>> stats.kruskal(x, y)
KruskalResult(statistic=0.2727272727272734, pvalue=0.6015081344405895)
>>> x = [1, 1, 1]
>>> y = [2, 2, 2]
>>> z = [2, 2]
>>> stats.kruskal(x, y, z)
KruskalResult(statistic=7.0, pvalue=0.0301973834223185)
"""
args = list(map(np.asarray, args))
num_groups = len(args)
if num_groups < 2:
raise ValueError("Need at least two groups in stats.kruskal()")
for arg in args:
if arg.size == 0:
return KruskalResult(np.nan, np.nan)
elif arg.ndim != 1:
raise ValueError("Samples must be one-dimensional.")
n = np.asarray(list(map(len, args)))
if nan_policy not in ('propagate', 'raise', 'omit'):
raise ValueError("nan_policy must be 'propagate', 'raise' or 'omit'")
contains_nan = False
for arg in args:
cn = _contains_nan(arg, nan_policy)
if cn[0]:
contains_nan = True
break
if contains_nan and nan_policy == 'omit':
for a in args:
a = ma.masked_invalid(a)
return mstats_basic.kruskal(*args)
if contains_nan and nan_policy == 'propagate':
return KruskalResult(np.nan, np.nan)
alldata = np.concatenate(args)
ranked = rankdata(alldata)
ties = tiecorrect(ranked)
if ties == 0:
raise ValueError('All numbers are identical in kruskal')
# Compute sum^2/n for each group and sum
j = np.insert(np.cumsum(n), 0, 0)
ssbn = 0
for i in range(num_groups):
ssbn += _square_of_sums(ranked[j[i]:j[i+1]]) / n[i]
totaln = np.sum(n, dtype=float)
h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
df = num_groups - 1
h /= ties
return KruskalResult(h, distributions.chi2.sf(h, df))
FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
('statistic', 'pvalue'))
def friedmanchisquare(*args):
"""Compute the Friedman test for repeated measurements.
The Friedman test tests the null hypothesis that repeated measurements of
the same individuals have the same distribution. It is often used
to test for consistency among measurements obtained in different ways.
For example, if two measurement techniques are used on the same set of
individuals, the Friedman test can be used to determine if the two
measurement techniques are consistent.
Parameters
----------
measurements1, measurements2, measurements3... : array_like
Arrays of measurements. All of the arrays must have the same number
of elements. At least 3 sets of measurements must be given.
Returns
-------
statistic : float
The test statistic, correcting for ties.
pvalue : float
The associated p-value assuming that the test statistic has a chi
squared distribution.
Notes
-----
Due to the assumption that the test statistic has a chi squared
distribution, the p-value is only reliable for n > 10 and more than
6 repeated measurements.
References
----------
.. [1] https://en.wikipedia.org/wiki/Friedman_test
"""
k = len(args)
if k < 3:
raise ValueError('At least 3 sets of measurements must be given '
'for Friedman test, got {}.'.format(k))
n = len(args[0])
for i in range(1, k):
if len(args[i]) != n:
raise ValueError('Unequal N in friedmanchisquare. Aborting.')
# Rank data
data = np.vstack(args).T
data = data.astype(float)
for i in range(len(data)):
data[i] = rankdata(data[i])
# Handle ties
ties = 0
for i in range(len(data)):
replist, repnum = find_repeats(array(data[i]))
for t in repnum:
ties += t * (t*t - 1)
c = 1 - ties / (k*(k*k - 1)*n)
ssbn = np.sum(data.sum(axis=0)**2)
chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c
return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k - 1))
BrunnerMunzelResult = namedtuple('BrunnerMunzelResult',
('statistic', 'pvalue'))
def brunnermunzel(x, y, alternative="two-sided", distribution="t",
nan_policy='propagate'):
"""Compute the Brunner-Munzel test on samples x and y.
The Brunner-Munzel test is a nonparametric test of the null hypothesis that
when values are taken one by one from each group, the probabilities of
getting large values in both groups are equal.
Unlike the Wilcoxon-Mann-Whitney's U test, this does not require the
assumption of equivariance of two groups. Note that this does not assume
the distributions are same. This test works on two independent samples,
which may have different sizes.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
distribution : {'t', 'normal'}, optional
Defines how to get the p-value.
The following options are available (default is 't'):
* 't': get the p-value by t-distribution
* 'normal': get the p-value by standard normal distribution.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float
The Brunner-Munzer W statistic.
pvalue : float
p-value assuming an t distribution. One-sided or
two-sided, depending on the choice of `alternative` and `distribution`.
See Also
--------
mannwhitneyu : Mann-Whitney rank test on two samples.
Notes
-----
Brunner and Munzel recommended to estimate the p-value by t-distribution
when the size of data is 50 or less. If the size is lower than 10, it would
be better to use permuted Brunner Munzel test (see [2]_).
References
----------
.. [1] Brunner, E. and Munzel, U. "The nonparametric Benhrens-Fisher
problem: Asymptotic theory and a small-sample approximation".
Biometrical Journal. Vol. 42(2000): 17-25.
.. [2] Neubert, K. and Brunner, E. "A studentized permutation test for the
non-parametric Behrens-Fisher problem". Computational Statistics and
Data Analysis. Vol. 51(2007): 5192-5204.
Examples
--------
>>> from scipy import stats
>>> x1 = [1,2,1,1,1,1,1,1,1,1,2,4,1,1]
>>> x2 = [3,3,4,3,1,2,3,1,1,5,4]
>>> w, p_value = stats.brunnermunzel(x1, x2)
>>> w
3.1374674823029505
>>> p_value
0.0057862086661515377
"""
x = np.asarray(x)
y = np.asarray(y)
# check both x and y
cnx, npx = _contains_nan(x, nan_policy)
cny, npy = _contains_nan(y, nan_policy)
contains_nan = cnx or cny
if npx == "omit" or npy == "omit":
nan_policy = "omit"
if contains_nan and nan_policy == "propagate":
return BrunnerMunzelResult(np.nan, np.nan)
elif contains_nan and nan_policy == "omit":
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
return mstats_basic.brunnermunzel(x, y, alternative, distribution)
nx = len(x)
ny = len(y)
if nx == 0 or ny == 0:
return BrunnerMunzelResult(np.nan, np.nan)
rankc = rankdata(np.concatenate((x, y)))
rankcx = rankc[0:nx]
rankcy = rankc[nx:nx+ny]
rankcx_mean = np.mean(rankcx)
rankcy_mean = np.mean(rankcy)
rankx = rankdata(x)
ranky = rankdata(y)
rankx_mean = np.mean(rankx)
ranky_mean = np.mean(ranky)
Sx = np.sum(np.power(rankcx - rankx - rankcx_mean + rankx_mean, 2.0))
Sx /= nx - 1
Sy = np.sum(np.power(rankcy - ranky - rankcy_mean + ranky_mean, 2.0))
Sy /= ny - 1
wbfn = nx * ny * (rankcy_mean - rankcx_mean)
wbfn /= (nx + ny) * np.sqrt(nx * Sx + ny * Sy)
if distribution == "t":
df_numer = np.power(nx * Sx + ny * Sy, 2.0)
df_denom = np.power(nx * Sx, 2.0) / (nx - 1)
df_denom += np.power(ny * Sy, 2.0) / (ny - 1)
df = df_numer / df_denom
p = distributions.t.cdf(wbfn, df)
elif distribution == "normal":
p = distributions.norm.cdf(wbfn)
else:
raise ValueError(
"distribution should be 't' or 'normal'")
if alternative == "greater":
pass
elif alternative == "less":
p = 1 - p
elif alternative == "two-sided":
p = 2 * np.min([p, 1-p])
else:
raise ValueError(
"alternative should be 'less', 'greater' or 'two-sided'")
return BrunnerMunzelResult(wbfn, p)
def combine_pvalues(pvalues, method='fisher', weights=None):
"""
Combine p-values from independent tests bearing upon the same hypothesis.
Parameters
----------
pvalues : array_like, 1-D
Array of p-values assumed to come from independent tests.
method : {'fisher', 'pearson', 'tippett', 'stouffer',
'mudholkar_george'}, optional
Name of method to use to combine p-values.
The following methods are available (default is 'fisher'):
* 'fisher': Fisher's method (Fisher's combined probability test), the
sum of the logarithm of the p-values
* 'pearson': Pearson's method (similar to Fisher's but uses sum of the
complement of the p-values inside the logarithms)
* 'tippett': Tippett's method (minimum of p-values)
* 'stouffer': Stouffer's Z-score method
* 'mudholkar_george': the difference of Fisher's and Pearson's methods
divided by 2
weights : array_like, 1-D, optional
Optional array of weights used only for Stouffer's Z-score method.
Returns
-------
statistic: float
The statistic calculated by the specified method.
pval: float
The combined p-value.
Notes
-----
Fisher's method (also known as Fisher's combined probability test) [1]_ uses
a chi-squared statistic to compute a combined p-value. The closely related
Stouffer's Z-score method [2]_ uses Z-scores rather than p-values. The
advantage of Stouffer's method is that it is straightforward to introduce
weights, which can make Stouffer's method more powerful than Fisher's
method when the p-values are from studies of different size [6]_ [7]_.
The Pearson's method uses :math:`log(1-p_i)` inside the sum whereas Fisher's
method uses :math:`log(p_i)` [4]_. For Fisher's and Pearson's method, the
sum of the logarithms is multiplied by -2 in the implementation. This
quantity has a chi-square distribution that determines the p-value. The
`mudholkar_george` method is the difference of the Fisher's and Pearson's
test statistics, each of which include the -2 factor [4]_. However, the
`mudholkar_george` method does not include these -2 factors. The test
statistic of `mudholkar_george` is the sum of logisitic random variables and
equation 3.6 in [3]_ is used to approximate the p-value based on Student's
t-distribution.
Fisher's method may be extended to combine p-values from dependent tests
[5]_. Extensions such as Brown's method and Kost's method are not currently
implemented.
.. versionadded:: 0.15.0
References
----------
.. [1] https://en.wikipedia.org/wiki/Fisher%27s_method
.. [2] https://en.wikipedia.org/wiki/Fisher%27s_method#Relation_to_Stouffer.27s_Z-score_method
.. [3] George, E. O., and G. S. Mudholkar. "On the convolution of logistic
random variables." Metrika 30.1 (1983): 1-13.
.. [4] Heard, N. and Rubin-Delanchey, P. "Choosing between methods of
combining p-values." Biometrika 105.1 (2018): 239-246.
.. [5] Whitlock, M. C. "Combining probability from independent tests: the
weighted Z-method is superior to Fisher's approach." Journal of
Evolutionary Biology 18, no. 5 (2005): 1368-1373.
.. [6] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method
for combining probabilities in meta-analysis." Journal of
Evolutionary Biology 24, no. 8 (2011): 1836-1841.
.. [7] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
"""
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
statistic = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(statistic, 2 * len(pvalues))
elif method == 'pearson':
statistic = -2 * np.sum(np.log1p(-pvalues))
pval = distributions.chi2.sf(statistic, 2 * len(pvalues))
elif method == 'mudholkar_george':
normalizing_factor = np.sqrt(3/len(pvalues))/np.pi
statistic = -np.sum(np.log(pvalues)) + np.sum(np.log1p(-pvalues))
nu = 5 * len(pvalues) + 4
approx_factor = np.sqrt(nu / (nu - 2))
pval = distributions.t.sf(statistic * normalizing_factor
* approx_factor, nu)
elif method == 'tippett':
statistic = np.min(pvalues)
pval = distributions.beta.sf(statistic, 1, len(pvalues))
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
statistic = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(statistic)
else:
raise ValueError(
"Invalid method '%s'. Options are 'fisher', 'pearson', \
'mudholkar_george', 'tippett', 'or 'stouffer'", method)
return (statistic, pval)
#####################################
# STATISTICAL DISTANCES #
#####################################
def wasserstein_distance(u_values, v_values, u_weights=None, v_weights=None):
r"""
Compute the first Wasserstein distance between two 1D distributions.
This distance is also known as the earth mover's distance, since it can be
seen as the minimum amount of "work" required to transform :math:`u` into
:math:`v`, where "work" is measured as the amount of distribution weight
that must be moved, multiplied by the distance it has to be moved.
.. versionadded:: 1.0.0
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The first Wasserstein distance between the distributions :math:`u` and
:math:`v` is:
.. math::
l_1 (u, v) = \inf_{\pi \in \Gamma (u, v)} \int_{\mathbb{R} \times
\mathbb{R}} |x-y| \mathrm{d} \pi (x, y)
where :math:`\Gamma (u, v)` is the set of (probability) distributions on
:math:`\mathbb{R} \times \mathbb{R}` whose marginals are :math:`u` and
:math:`v` on the first and second factors respectively.
If :math:`U` and :math:`V` are the respective CDFs of :math:`u` and
:math:`v`, this distance also equals to:
.. math::
l_1(u, v) = \int_{-\infty}^{+\infty} |U-V|
See [2]_ for a proof of the equivalence of both definitions.
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] "Wasserstein metric", https://en.wikipedia.org/wiki/Wasserstein_metric
.. [2] Ramdas, Garcia, Cuturi "On Wasserstein Two Sample Testing and Related
Families of Nonparametric Tests" (2015). :arXiv:`1509.02237`.
Examples
--------
>>> from scipy.stats import wasserstein_distance
>>> wasserstein_distance([0, 1, 3], [5, 6, 8])
5.0
>>> wasserstein_distance([0, 1], [0, 1], [3, 1], [2, 2])
0.25
>>> wasserstein_distance([3.4, 3.9, 7.5, 7.8], [4.5, 1.4],
... [1.4, 0.9, 3.1, 7.2], [3.2, 3.5])
4.0781331438047861
"""
return _cdf_distance(1, u_values, v_values, u_weights, v_weights)
def energy_distance(u_values, v_values, u_weights=None, v_weights=None):
r"""Compute the energy distance between two 1D distributions.
.. versionadded:: 1.0.0
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The energy distance between two distributions :math:`u` and :math:`v`, whose
respective CDFs are :math:`U` and :math:`V`, equals to:
.. math::
D(u, v) = \left( 2\mathbb E|X - Y| - \mathbb E|X - X'| -
\mathbb E|Y - Y'| \right)^{1/2}
where :math:`X` and :math:`X'` (resp. :math:`Y` and :math:`Y'`) are
independent random variables whose probability distribution is :math:`u`
(resp. :math:`v`).
As shown in [2]_, for one-dimensional real-valued variables, the energy
distance is linked to the non-distribution-free version of the Cramér-von
Mises distance:
.. math::
D(u, v) = \sqrt{2} l_2(u, v) = \left( 2 \int_{-\infty}^{+\infty} (U-V)^2
\right)^{1/2}
Note that the common Cramér-von Mises criterion uses the distribution-free
version of the distance. See [2]_ (section 2), for more details about both
versions of the distance.
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] "Energy distance", https://en.wikipedia.org/wiki/Energy_distance
.. [2] Szekely "E-statistics: The energy of statistical samples." Bowling
Green State University, Department of Mathematics and Statistics,
Technical Report 02-16 (2002).
.. [3] Rizzo, Szekely "Energy distance." Wiley Interdisciplinary Reviews:
Computational Statistics, 8(1):27-38 (2015).
.. [4] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer,
Munos "The Cramer Distance as a Solution to Biased Wasserstein
Gradients" (2017). :arXiv:`1705.10743`.
Examples
--------
>>> from scipy.stats import energy_distance
>>> energy_distance([0], [2])
2.0000000000000004
>>> energy_distance([0, 8], [0, 8], [3, 1], [2, 2])
1.0000000000000002
>>> energy_distance([0.7, 7.4, 2.4, 6.8], [1.4, 8. ],
... [2.1, 4.2, 7.4, 8. ], [7.6, 8.8])
0.88003340976158217
"""
return np.sqrt(2) * _cdf_distance(2, u_values, v_values,
u_weights, v_weights)
def _cdf_distance(p, u_values, v_values, u_weights=None, v_weights=None):
r"""
Compute, between two one-dimensional distributions :math:`u` and
:math:`v`, whose respective CDFs are :math:`U` and :math:`V`, the
statistical distance that is defined as:
.. math::
l_p(u, v) = \left( \int_{-\infty}^{+\infty} |U-V|^p \right)^{1/p}
p is a positive parameter; p = 1 gives the Wasserstein distance, p = 2
gives the energy distance.
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer,
Munos "The Cramer Distance as a Solution to Biased Wasserstein
Gradients" (2017). :arXiv:`1705.10743`.
"""
u_values, u_weights = _validate_distribution(u_values, u_weights)
v_values, v_weights = _validate_distribution(v_values, v_weights)
u_sorter = np.argsort(u_values)
v_sorter = np.argsort(v_values)
all_values = np.concatenate((u_values, v_values))
all_values.sort(kind='mergesort')
# Compute the differences between pairs of successive values of u and v.
deltas = np.diff(all_values)
# Get the respective positions of the values of u and v among the values of
# both distributions.
u_cdf_indices = u_values[u_sorter].searchsorted(all_values[:-1], 'right')
v_cdf_indices = v_values[v_sorter].searchsorted(all_values[:-1], 'right')
# Calculate the CDFs of u and v using their weights, if specified.
if u_weights is None:
u_cdf = u_cdf_indices / u_values.size
else:
u_sorted_cumweights = np.concatenate(([0],
np.cumsum(u_weights[u_sorter])))
u_cdf = u_sorted_cumweights[u_cdf_indices] / u_sorted_cumweights[-1]
if v_weights is None:
v_cdf = v_cdf_indices / v_values.size
else:
v_sorted_cumweights = np.concatenate(([0],
np.cumsum(v_weights[v_sorter])))
v_cdf = v_sorted_cumweights[v_cdf_indices] / v_sorted_cumweights[-1]
# Compute the value of the integral based on the CDFs.
# If p = 1 or p = 2, we avoid using np.power, which introduces an overhead
# of about 15%.
if p == 1:
return np.sum(np.multiply(np.abs(u_cdf - v_cdf), deltas))
if p == 2:
return np.sqrt(np.sum(np.multiply(np.square(u_cdf - v_cdf), deltas)))
return np.power(np.sum(np.multiply(np.power(np.abs(u_cdf - v_cdf), p),
deltas)), 1/p)
def _validate_distribution(values, weights):
"""
Validate the values and weights from a distribution input of `cdf_distance`
and return them as ndarray objects.
Parameters
----------
values : array_like
Values observed in the (empirical) distribution.
weights : array_like
Weight for each value.
Returns
-------
values : ndarray
Values as ndarray.
weights : ndarray
Weights as ndarray.
"""
# Validate the value array.
values = np.asarray(values, dtype=float)
if len(values) == 0:
raise ValueError("Distribution can't be empty.")
# Validate the weight array, if specified.
if weights is not None:
weights = np.asarray(weights, dtype=float)
if len(weights) != len(values):
raise ValueError('Value and weight array-likes for the same '
'empirical distribution must be of the same size.')
if np.any(weights < 0):
raise ValueError('All weights must be non-negative.')
if not 0 < np.sum(weights) < np.inf:
raise ValueError('Weight array-like sum must be positive and '
'finite. Set as None for an equal distribution of '
'weight.')
return values, weights
return values, None
#####################################
# SUPPORT FUNCTIONS #
#####################################
RepeatedResults = namedtuple('RepeatedResults', ('values', 'counts'))
def find_repeats(arr):
"""Find repeats and repeat counts.
Parameters
----------
arr : array_like
Input array. This is cast to float64.
Returns
-------
values : ndarray
The unique values from the (flattened) input that are repeated.
counts : ndarray
Number of times the corresponding 'value' is repeated.
Notes
-----
In numpy >= 1.9 `numpy.unique` provides similar functionality. The main
difference is that `find_repeats` only returns repeated values.
Examples
--------
>>> from scipy import stats
>>> stats.find_repeats([2, 1, 2, 3, 2, 2, 5])
RepeatedResults(values=array([2.]), counts=array([4]))
>>> stats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]])
RepeatedResults(values=array([4., 5.]), counts=array([2, 2]))
"""
# Note: always copies.
return RepeatedResults(*_find_repeats(np.array(arr, dtype=np.float64)))
def _sum_of_squares(a, axis=0):
"""Square each element of the input array, and return the sum(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
sum_of_squares : ndarray
The sum along the given axis for (a**2).
See Also
--------
_square_of_sums : The square(s) of the sum(s) (the opposite of
`_sum_of_squares`).
"""
a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis)
def _square_of_sums(a, axis=0):
"""Sum elements of the input array, and return the square(s) of that sum.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
square_of_sums : float or ndarray
The square of the sum over `axis`.
See Also
--------
_sum_of_squares : The sum of squares (the opposite of `square_of_sums`).
"""
a, axis = _chk_asarray(a, axis)
s = np.sum(a, axis)
if not np.isscalar(s):
return s.astype(float) * s
else:
return float(s) * s
def rankdata(a, method='average', *, axis=None):
"""Assign ranks to data, dealing with ties appropriately.
By default (``axis=None``), the data array is first flattened, and a flat
array of ranks is returned. Separately reshape the rank array to the
shape of the data array if desired (see Examples).
Ranks begin at 1. The `method` argument controls how ranks are assigned
to equal values. See [1]_ for further discussion of ranking methods.
Parameters
----------
a : array_like
The array of values to be ranked.
method : {'average', 'min', 'max', 'dense', 'ordinal'}, optional
The method used to assign ranks to tied elements.
The following methods are available (default is 'average'):
* 'average': The average of the ranks that would have been assigned to
all the tied values is assigned to each value.
* 'min': The minimum of the ranks that would have been assigned to all
the tied values is assigned to each value. (This is also
referred to as "competition" ranking.)
* 'max': The maximum of the ranks that would have been assigned to all
the tied values is assigned to each value.
* 'dense': Like 'min', but the rank of the next highest element is
assigned the rank immediately after those assigned to the tied
elements.
* 'ordinal': All values are given a distinct rank, corresponding to
the order that the values occur in `a`.
axis : {None, int}, optional
Axis along which to perform the ranking. If ``None``, the data array
is first flattened.
Returns
-------
ranks : ndarray
An array of size equal to the size of `a`, containing rank
scores.
References
----------
.. [1] "Ranking", https://en.wikipedia.org/wiki/Ranking
Examples
--------
>>> from scipy.stats import rankdata
>>> rankdata([0, 2, 3, 2])
array([ 1. , 2.5, 4. , 2.5])
>>> rankdata([0, 2, 3, 2], method='min')
array([ 1, 2, 4, 2])
>>> rankdata([0, 2, 3, 2], method='max')
array([ 1, 3, 4, 3])
>>> rankdata([0, 2, 3, 2], method='dense')
array([ 1, 2, 3, 2])
>>> rankdata([0, 2, 3, 2], method='ordinal')
array([ 1, 2, 4, 3])
>>> rankdata([[0, 2], [3, 2]]).reshape(2,2)
array([[1. , 2.5],
[4. , 2.5]])
>>> rankdata([[0, 2, 2], [3, 2, 5]], axis=1)
array([[1. , 2.5, 2.5],
[2. , 1. , 3. ]])
"""
if method not in ('average', 'min', 'max', 'dense', 'ordinal'):
raise ValueError('unknown method "{0}"'.format(method))
if axis is not None:
a = np.asarray(a)
if a.size == 0:
# The return values of `normalize_axis_index` are ignored. The
# call validates `axis`, even though we won't use it.
# use scipy._lib._util._normalize_axis_index when available
np.core.multiarray.normalize_axis_index(axis, a.ndim)
dt = np.float64 if method == 'average' else np.int_
return np.empty(a.shape, dtype=dt)
return np.apply_along_axis(rankdata, axis, a, method)
arr = np.ravel(np.asarray(a))
algo = 'mergesort' if method == 'ordinal' else 'quicksort'
sorter = np.argsort(arr, kind=algo)
inv = np.empty(sorter.size, dtype=np.intp)
inv[sorter] = np.arange(sorter.size, dtype=np.intp)
if method == 'ordinal':
return inv + 1
arr = arr[sorter]
obs = np.r_[True, arr[1:] != arr[:-1]]
dense = obs.cumsum()[inv]
if method == 'dense':
return dense
# cumulative counts of each unique value
count = np.r_[np.nonzero(obs)[0], len(obs)]
if method == 'max':
return count[dense]
if method == 'min':
return count[dense - 1] + 1
# average method
return .5 * (count[dense] + count[dense - 1] + 1)
|
bsd-3-clause
|
tosolveit/scikit-learn
|
examples/applications/face_recognition.py
|
191
|
5513
|
"""
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
Expected results for the top 5 most represented people in the dataset::
precision recall f1-score support
Ariel Sharon 0.67 0.92 0.77 13
Colin Powell 0.75 0.78 0.76 60
Donald Rumsfeld 0.78 0.67 0.72 27
George W Bush 0.86 0.86 0.86 146
Gerhard Schroeder 0.76 0.76 0.76 25
Hugo Chavez 0.67 0.67 0.67 15
Tony Blair 0.81 0.69 0.75 36
avg / total 0.80 0.80 0.80 322
"""
from __future__ import print_function
from time import time
import logging
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
###############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
###############################################################################
# Train a SVM classification model
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
###############################################################################
# Quantitative evaluation of the model quality on the test set
print("Predicting people's names on the test set")
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
|
bsd-3-clause
|
tosh1ki/pyogi
|
doc/sample_code/search_forking_pro.py
|
1
|
2029
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
プロ棋士の棋譜の中から王手飛車取りの場面を探して,
「プロの対局では王手飛車をかけたほうが負ける」が本当かどうかを確かめる
2chkifu.zipをダウンロードして適当なディレクトリに解凍,
`python3 search_forking_pro.py -p [2chkifuのパス]` のような感じで実行する
'''
import os
import argparse
import pandas as pd
from pyogi.ki2converter import *
from pyogi.kifu import *
from get_ki2_list import get_ki2_list
if __name__ == '__main__':
path_ki2_list = get_ki2_list(argparse.ArgumentParser())
res_table = []
for path_ki2 in path_ki2_list:
ki2converter = Ki2converter()
ki2converter.from_path(path_ki2)
csa = ki2converter.to_csa()
if not csa:
continue
kifu = Kifu()
kifu.from_csa(csa)
if not kifu.extracted:
continue
res = kifu.get_forking(['OU', 'HI'], display=True)
# if res[2] or res[3]:
# print(kifu.players)
# Data
# fork: sente forked | gote forked
# forkandwin: (sente won & sente forked) | (gote won & gote forked)
res_table.append(
{
'path': path_ki2,
'player0': kifu.players[0],
'player1': kifu.players[1],
'sente_won': kifu.sente_win,
'sennichite': kifu.is_sennichite,
'sente_forking': res[2] != [],
'gote_forking': res[3] != [],
'teai': kifu.teai,
'fork': res[2] != [] or res[3] != [],
'forkandwin': ((kifu.sente_win and res[2]!=[]) or
(not kifu.sente_win and res[3]!=[]))
}
)
if len(res_table) % 10000 == 0:
print(len(res_table), path_ki2)
# Output
df = pd.DataFrame(res_table)
print(pd.crosstab(df.loc[:, 'fork'], df.loc[:, 'forkandwin']))
|
mit
|
mugizico/scikit-learn
|
sklearn/decomposition/tests/test_incremental_pca.py
|
297
|
8265
|
"""Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
|
bsd-3-clause
|
aidanheerdegen/MOM6-examples
|
tools/analysis/m6toolbox.py
|
1
|
5877
|
"""
A collection of useful functions...
"""
import numpy as np
def section2quadmesh(x, z, q, representation='pcm'):
"""
Creates the appropriate quadmesh coordinates to plot a scalar q(1:nk,1:ni) at
horizontal positions x(1:ni+1) and between interfaces at z(nk+1,ni), using
various representations of the topography.
Returns X(2*ni+1), Z(nk+1,2*ni+1) and Q(nk,2*ni) to be passed to pcolormesh.
TBD: Optionally, x can be dimensioned as x(ni) in which case it will be extraplated as if it had
had dimensions x(ni+1).
Optional argument:
representation='pcm' (default) yields a step-wise visualization, appropriate for
z-coordinate models.
representation='plm' yields a piecewise-linear visualization more representative
of general-coordinate (and isopycnal) models.
representation='linear' is the aesthetically most pleasing but does not
represent the data conservatively.
"""
if x.ndim!=1: raise Exception('The x argument must be a vector')
if z.ndim!=2: raise Exception('The z argument should be a 2D array')
if q.ndim!=2: raise Exception('The z argument should be a 2D array')
qnk, qni = q.shape
znk, zni = z.shape
xni = x.size
if zni!=qni: raise Exception('The last dimension of z and q must be equal in length')
if znk!=qnk+1: raise Exception('The first dimension of z must be 1 longer than that of q. q has %i levels'%qnk)
if xni!=qni+1: raise Exception('The length of x must 1 longer than the last dimension of q')
if type( z ) == np.ma.core.MaskedArray: z[z.mask] = 0
if type( q ) == np.ma.core.MaskedArray: qmin = np.amin(q); q[q.mask] = qmin
periodicDomain = abs((x[-1]-x[0])-360. ) < 1e-6 # Detect if horizontal axis is a periodic domain
if representation=='pcm':
X = np.zeros((2*qni))
X[::2] = x[:-1]
X[1::2] = x[1:]
Z = np.zeros((qnk+1,2*qni))
Z[:,::2] = z
Z[:,1::2] = z
Q = np.zeros((qnk,2*qni-1))
Q[:,::2] = q
Q[:,1::2] = ( q[:,:-1] + q[:,1:] )/2.
elif representation=='linear':
X = np.zeros((2*qni+1))
X[::2] = x
X[1::2] = ( x[0:-1] + x[1:] )/2.
Z = np.zeros((qnk+1,2*qni+1))
Z[:,1::2] = z
Z[:,2:-1:2] = ( z[:,0:-1] + z[:,1:] )/2.
Z[:,0] = z[:,0]
Z[:,-1] = z[:,-1]
Q = np.zeros((qnk,2*qni))
Q[:,::2] = q
Q[:,1::2] = q
elif representation=='plm':
X = np.zeros((2*qni))
X[::2] = x[:-1]
X[1::2] = x[1:]
# PLM reconstruction for Z
dz = np.roll(z,-1,axis=1) - z # Right-sided difference
if not periodicDomain: dz[:,-1] = 0 # Non-periodic boundary
d2 = ( np.roll(z,-1,axis=1) - np.roll(z,1,axis=1) )/2. # Centered difference
d2 = ( dz + np.roll(dz,1,axis=1) )/2. # Centered difference
s = np.sign( d2 ) # Sign of centered slope
s[dz * np.roll(dz,1,axis=1) <= 0] = 0 # Flatten extrema
dz = np.abs(dz) # Only need magnitude from here on
S = s * np.minimum( np.abs(d2), np.minimum( dz, np.roll(dz,1,axis=1) ) ) # PLM slope
Z = np.zeros((qnk+1,2*qni))
Z[:,::2] = z - S/2.
Z[:,1::2] = z + S/2.
Q = np.zeros((qnk,2*qni-1))
Q[:,::2] = q
Q[:,1::2] = ( q[:,:-1] + q[:,1:] )/2.
else: raise Exception('Unknown representation!')
return X, Z, Q
def rho_Wright97(S, T, P=0):
"""
Returns the density of seawater for the given salinity, potential temperature
and pressure.
Units: salinity in PSU, potential temperature in degrees Celsius and pressure in Pascals.
"""
a0 = 7.057924e-4; a1 = 3.480336e-7; a2 = -1.112733e-7
b0 = 5.790749e8; b1 = 3.516535e6; b2 = -4.002714e4
b3 = 2.084372e2; b4 = 5.944068e5; b5 = -9.643486e3
c0 = 1.704853e5; c1 = 7.904722e2; c2 = -7.984422
c3 = 5.140652e-2; c4 = -2.302158e2; c5 = -3.079464
al0 = a0 + a1*T + a2*S
p0 = b0 + b4*S + T * (b1 + T*(b2 + b3*T) + b5*S)
Lambda = c0 + c4*S + T * (c1 + T*(c2 + c3*T) + c5*S)
return (P + p0) / (Lambda + al0*(P + p0))
def ice9(i, j, source, xcyclic=True, tripolar=True):
"""
An iterative (stack based) implementation of "Ice 9".
The flood fill starts at [j,i] and treats any positive value of "source" as
passable. Zero and negative values block flooding.
xcyclic = True allows cyclic behavior in the last index. (default)
tripolar = True allows a fold across the top-most edge. (default)
Returns an array of 0's and 1's.
"""
wetMask = 0*source
(nj,ni) = wetMask.shape
stack = set()
stack.add( (j,i) )
while stack:
(j,i) = stack.pop()
if wetMask[j,i] or source[j,i] <= 0: continue
wetMask[j,i] = 1
if i>0: stack.add( (j,i-1) )
elif xcyclic: stack.add( (j,ni-1) )
if i<ni-1: stack.add( (j,i+1) )
elif xcyclic: stack.add( (0,j) )
if j>0: stack.add( (j-1,i) )
if j<nj-1: stack.add( (j+1,i) )
elif tripolar: stack.add( (j,ni-1-i) ) # Tri-polar fold
return wetMask
def maskFromDepth(depth, zCellTop):
"""
Generates a "wet mask" for a z-coordinate model based on relative location of
the ocean bottom to the upper interface of the cell.
depth (2d) is positiveo
zCellTop (scalar) is a negative position of the upper interface of the cell..
"""
wet = 0*depth
wet[depth>-zCellTop] = 1
return wet
# Tests
if __name__ == '__main__':
import matplotlib.pyplot as plt
import numpy.matlib
# Test data
x=np.arange(5)
z=np.array([[0,0.2,0.3,-.1],[1,1.5,.7,.4],[2,2,1.5,2],[3,2.3,1.5,2.1]])*-1
q=np.matlib.rand(3,4)
print('x=',x)
print('z=',z)
print('q=',q)
X, Z, Q = section2quadmesh(x, z, q)
print('X=',X)
print('Z=',Z)
print('Q=',Q)
plt.subplot(3,1,1)
plt.pcolormesh(X, Z, Q)
X, Z, Q = section2quadmesh(x, z, q, representation='linear')
print('X=',X)
print('Z=',Z)
print('Q=',Q)
plt.subplot(3,1,2)
plt.pcolormesh(X, Z, Q)
X, Z, Q = section2quadmesh(x, z, q, representation='plm')
print('X=',X)
print('Z=',Z)
print('Q=',Q)
plt.subplot(3,1,3)
plt.pcolormesh(X, Z, Q)
plt.show()
|
gpl-3.0
|
kdebrab/pandas
|
pandas/tests/indexes/multi/test_partial_indexing.py
|
6
|
3298
|
import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, MultiIndex, date_range
def test_partial_string_timestamp_multiindex():
# GH10331
dr = pd.date_range('2016-01-01', '2016-01-03', freq='12H')
abc = ['a', 'b', 'c']
ix = pd.MultiIndex.from_product([dr, abc])
df = pd.DataFrame({'c1': range(0, 15)}, index=ix)
idx = pd.IndexSlice
# c1
# 2016-01-01 00:00:00 a 0
# b 1
# c 2
# 2016-01-01 12:00:00 a 3
# b 4
# c 5
# 2016-01-02 00:00:00 a 6
# b 7
# c 8
# 2016-01-02 12:00:00 a 9
# b 10
# c 11
# 2016-01-03 00:00:00 a 12
# b 13
# c 14
# partial string matching on a single index
for df_swap in (df.swaplevel(),
df.swaplevel(0),
df.swaplevel(0, 1)):
df_swap = df_swap.sort_index()
just_a = df_swap.loc['a']
result = just_a.loc['2016-01-01']
expected = df.loc[idx[:, 'a'], :].iloc[0:2]
expected.index = expected.index.droplevel(1)
tm.assert_frame_equal(result, expected)
# indexing with IndexSlice
result = df.loc[idx['2016-01-01':'2016-02-01', :], :]
expected = df
tm.assert_frame_equal(result, expected)
# match on secondary index
result = df_swap.loc[idx[:, '2016-01-01':'2016-01-01'], :]
expected = df_swap.iloc[[0, 1, 5, 6, 10, 11]]
tm.assert_frame_equal(result, expected)
# Even though this syntax works on a single index, this is somewhat
# ambiguous and we don't want to extend this behavior forward to work
# in multi-indexes. This would amount to selecting a scalar from a
# column.
with pytest.raises(KeyError):
df['2016-01-01']
# partial string match on year only
result = df.loc['2016']
expected = df
tm.assert_frame_equal(result, expected)
# partial string match on date
result = df.loc['2016-01-01']
expected = df.iloc[0:6]
tm.assert_frame_equal(result, expected)
# partial string match on date and hour, from middle
result = df.loc['2016-01-02 12']
expected = df.iloc[9:12]
tm.assert_frame_equal(result, expected)
# partial string match on secondary index
result = df_swap.loc[idx[:, '2016-01-02'], :]
expected = df_swap.iloc[[2, 3, 7, 8, 12, 13]]
tm.assert_frame_equal(result, expected)
# tuple selector with partial string match on date
result = df.loc[('2016-01-01', 'a'), :]
expected = df.iloc[[0, 3]]
tm.assert_frame_equal(result, expected)
# Slicing date on first level should break (of course)
with pytest.raises(KeyError):
df_swap.loc['2016-01-01']
# GH12685 (partial string with daily resolution or below)
dr = date_range('2013-01-01', periods=100, freq='D')
ix = MultiIndex.from_product([dr, ['a', 'b']])
df = DataFrame(np.random.randn(200, 1), columns=['A'], index=ix)
result = df.loc[idx['2013-03':'2013-03', :], :]
expected = df.iloc[118:180]
tm.assert_frame_equal(result, expected)
|
bsd-3-clause
|
pgleeson/neurotune
|
examples/example_2/optimization.py
|
1
|
13662
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Automated optimization of simulation of Basket cell
"""
__version__ = 0.1
from neuron import h
import neuron
import numpy as np
from neurotune import optimizers
from neurotune import evaluators
from matplotlib import pyplot as plt
from pyelectro import analysis
class Simulation(object):
"""
Simulation class - inspired by example of Philipp Rautenberg
Objects of this class control a current clamp simulation. Example of use:
>>> cell = Cell() #some kind of NEURON section
>>> sim = Simulation(cell)
>>> sim.go()
>>> sim.show()
"""
def __init__(self, recording_section, sim_time=1000, dt=0.05, v_init=-60):
self.recording_section = recording_section
self.sim_time = sim_time
self.dt = dt
self.go_already = False
self.v_init=v_init
def set_IClamp(self, delay=5, amp=0.1, dur=1000):
"""
Initializes values for current clamp.
Default values:
delay = 5 [ms]
amp = 0.1 [nA]
dur = 1000 [ms]
"""
stim = h.IClamp(self.recording_section(0.5))
stim.delay = delay
stim.amp = amp
stim.dur = dur
self.stim = stim
def set_recording(self):
# Record Time
self.rec_t = neuron.h.Vector()
self.rec_t.record(h._ref_t)
# Record Voltage
self.rec_v = h.Vector()
self.rec_v.record(self.recording_section(0.5)._ref_v)
def show(self):
"""
Plot the result of the simulation once it's been intialized
"""
from matplotlib import pyplot as plt
if self.go_already:
x = np.array(self.rec_t)
y = np.array(self.rec_v)
plt.plot(x, y)
plt.title("Simulation voltage vs time")
plt.xlabel("Time [ms]")
plt.ylabel("Voltage [mV]")
else:
print("""First you have to `go()` the simulation.""")
plt.show()
def go(self, sim_time=None):
"""
Start the simulation once it's been intialized
"""
self.set_recording()
h.dt = self.dt
h.finitialize(self.v_init)
neuron.init()
if sim_time:
neuron.run(sim_time)
else:
neuron.run(self.sim_time)
self.go_already = True
class BasketCellController():
"""
This is a canonical example of a controller class
It provides a run() method, this run method must accept at least two parameters:
1. candidates (list of list of numbers)
2. The corresponding parameters.
"""
def run(self,candidates,parameters):
"""
Run simulation for each candidate
This run method will loop through each candidate and run the simulation
corresponding to it's parameter values. It will populate an array called
traces with the resulting voltage traces for the simulation and return it.
"""
traces = []
for candidate in candidates:
sim_var = dict(zip(parameters,candidate))
t,v = self.run_individual(sim_var)
traces.append([t,v])
return traces
def set_section_mechanism(self, sec, mech, mech_attribute, mech_value):
"""
Set the value of an attribute of a NEURON section
"""
for seg in sec:
setattr(getattr(seg, mech), mech_attribute, mech_value)
def run_individual(self,sim_var,show=False):
"""
Run an individual simulation.
The candidate data has been flattened into the sim_var dict. The
sim_var dict contains parameter:value key value pairs, which are
applied to the model before it is simulated.
The simulation itself is carried out via the instantiation of a
Simulation object (see Simulation class above).
"""
#make compartments and connect them
soma=h.Section()
axon=h.Section()
soma.connect(axon)
axon.insert('na')
axon.insert('kv')
axon.insert('kv_3')
soma.insert('na')
soma.insert('kv')
soma.insert('kv_3')
soma.diam=10
soma.L=10
axon.diam=2
axon.L=100
#soma.insert('canrgc')
#soma.insert('cad2')
self.set_section_mechanism(axon,'na','gbar',sim_var['axon_gbar_na'])
self.set_section_mechanism(axon,'kv','gbar',sim_var['axon_gbar_kv'])
self.set_section_mechanism(axon,'kv_3','gbar',sim_var['axon_gbar_kv3'])
self.set_section_mechanism(soma,'na','gbar',sim_var['soma_gbar_na'])
self.set_section_mechanism(soma,'kv','gbar',sim_var['soma_gbar_kv'])
self.set_section_mechanism(soma,'kv_3','gbar',sim_var['soma_gbar_kv3'])
for sec in h.allsec():
sec.insert('pas')
sec.Ra=300
sec.cm=0.75
self.set_section_mechanism(sec,'pas','g',1.0/30000)
self.set_section_mechanism(sec,'pas','e',-70)
h.vshift_na=-5.0
sim=Simulation(soma,sim_time=1000,v_init=-70.0)
sim.set_IClamp(150, 0.1, 750)
sim.go()
if show:
sim.show()
return np.array(sim.rec_t), np.array(sim.rec_v)
def main(targets,
population_size=100,
max_evaluations=100,
num_selected=5,
num_offspring=5,
seeds=None):
"""
The optimization runs in this main method
"""
#make a controller
my_controller= BasketCellController()
#parameters to be modified in each simulation
parameters = ['axon_gbar_na',
'axon_gbar_kv',
'axon_gbar_kv3',
'soma_gbar_na',
'soma_gbar_kv',
'soma_gbar_kv3']
#above parameters will not be modified outside these bounds:
min_constraints = [0,0,0,0,0,0]
max_constraints = [10000,30,1,300,20,2]
# EXAMPLE - how to set a seed
#manual_vals=[50,50,2000,70,70,5,0.1,28.0,49.0,-73.0,23.0]
#analysis variables, these default values will do:
analysis_var={'peak_delta':1e-4,'baseline':0,'dvdt_threshold':0.0}
weights={'average_minimum': 1.0,
'spike_frequency_adaptation': 1.0,
'trough_phase_adaptation': 1.0,
'mean_spike_frequency': 1.0,
'average_maximum': 1.0,
'trough_decay_exponent': 1.0,
'interspike_time_covar': 1.0,
'min_peak_no': 1.0,
'spike_broadening': 1.0,
'spike_width_adaptation': 1.0,
'max_peak_no': 1.0,
'first_spike_time': 1.0,
'peak_decay_exponent': 1.0,
'pptd_error':1.0}
data = './100pA_1.csv'
print('data location')
print(data)
#make an evaluator, using automatic target evaluation:
my_evaluator=evaluators.IClampEvaluator(controller=my_controller,
analysis_start_time=0,
analysis_end_time=900,
target_data_path=data,
parameters=parameters,
analysis_var=analysis_var,
weights=weights,
targets=targets,
automatic=False)
#make an optimizer
my_optimizer=optimizers.CustomOptimizerA(max_constraints,
min_constraints,
my_evaluator,
population_size=population_size,
max_evaluations=max_evaluations,
num_selected=num_selected,
num_offspring=num_offspring,
num_elites=1,
mutation_rate=0.5,
seeds=seeds,
verbose=True)
#run the optimizer
best_candidate, fitness = my_optimizer.optimize(do_plot=False)
return best_candidate
#Instantiate a simulation controller to run simulations
controller = BasketCellController()
#surrogate simulation variables:
sim_var = {'axon_gbar_na': 3661.79,
'axon_gbar_kv': 23.23,
'axon_gbar_kv3': 0.26,
'soma_gbar_na': 79.91,
'soma_gbar_kv': 0.58,
'soma_gbar_kv3': 1.57}
parameters = ['axon_gbar_na',
'axon_gbar_kv',
'axon_gbar_kv3',
'soma_gbar_na',
'soma_gbar_kv',
'soma_gbar_kv3']
#This seed should always "win" because it is the solution.
#dud_seed = [3661.79, 23.23, 0.26, 79.91, 0.58, 1.57]
surrogate_t, surrogate_v = controller.run_individual(sim_var,show=False)
analysis_var={'peak_delta':1e-4,'baseline':0,'dvdt_threshold':0.0}
surrogate_analysis=analysis.IClampAnalysis(surrogate_v,
surrogate_t,
analysis_var,
start_analysis=0,
end_analysis=900,
smooth_data=False,
show_smoothed_data=False)
# The output of the analysis will serve as the basis for model optimization:
surrogate_targets = surrogate_analysis.analyse()
assert(surrogate_targets['max_peak_no'] == 13)
weights={'average_minimum': 1.0,
'spike_frequency_adaptation': 1.0,
'trough_phase_adaptation': 1.0,
'mean_spike_frequency': 1.0,
'average_maximum': 1.0,
'trough_decay_exponent': 1.0,
'interspike_time_covar': 1.0,
'min_peak_no': 1.0,
'spike_broadening': 1.0,
'spike_width_adaptation': 1.0,
'max_peak_no': 1.0,
'first_spike_time': 1.0,
'peak_decay_exponent': 1.0,
'pptd_error':1.0}
#Sanity check - expected is 0
fitness_value = surrogate_analysis.evaluate_fitness(surrogate_targets,
weights,
cost_function = analysis.normalised_cost_function)
assert(fitness_value == 0.0)
#raw_input()
c1_evals = 100
#Now try and get that candidate back, using the obtained targets:
candidate1 = main(surrogate_targets,
population_size=10,
max_evaluations=c1_evals,
num_selected=5,
num_offspring=10,
seeds=None)
c2_evals = 500
candidate2 = main(surrogate_targets,
population_size=30,
max_evaluations=c2_evals,
num_selected=5,
num_offspring=10,
seeds=None)
sim_var1={}
for key,value in zip(parameters,candidate1):
sim_var1[key]=value
candidate1_t,candidate1_v = controller.run_individual(sim_var1,show=False)
sim_var2={}
for key,value in zip(parameters,candidate2):
sim_var2[key]=value
candidate2_t,candidate2_v = controller.run_individual(sim_var2,show=False)
candidate1_analysis=analysis.IClampAnalysis(candidate1_v,
candidate1_t,
analysis_var,
start_analysis=0,
end_analysis=900,
smooth_data=False,
show_smoothed_data=False)
candidate1_analysis_results = candidate1_analysis.analyse()
candidate2_analysis = analysis.IClampAnalysis(candidate2_v,
candidate2_t,
analysis_var,
start_analysis=0,
end_analysis=900,
smooth_data=False,
show_smoothed_data=False)
candidate2_analysis_results = candidate2_analysis.analyse()
print('----------------------------------------')
print('Candidate 1 Analysis Results:')
print(candidate1_analysis_results)
print('Candidate 1 values:')
print(candidate1)
print('----------------------------------------')
print('Candidate 2 Analysis Results:')
print(candidate2_analysis_results)
print('Candidate 2 values:')
print(candidate2)
print('----------------------------------------')
print('Surrogate Targets:')
print(surrogate_targets)
print('Surrogate values:')
print(sim_var)
print('----------------------------------------')
#plotting
surrogate_plot, = plt.plot(np.array(surrogate_t),np.array(surrogate_v))
candidate1_plot, = plt.plot(np.array(candidate1_t),np.array(candidate1_v))
candidate2_plot, = plt.plot(np.array(candidate2_t),np.array(candidate2_v))
plt.legend([surrogate_plot,candidate1_plot,candidate2_plot],
["Surrogate model","Best model - %i evaluations"%c1_evals,"Best model - %i evaluations candidate"%c2_evals])
plt.ylim(-80.0,80.0)
plt.xlim(0.0,1000.0)
plt.title("Models optimized from surrogate solutions")
plt.xlabel("Time (ms)")
plt.ylabel("Membrane potential(mV)")
plt.savefig("surrogate_vs_candidates.png",bbox_inches='tight',format='png')
plt.show()
|
bsd-3-clause
|
SaikWolf/gnuradio
|
gr-utils/python/utils/plot_psd_base.py
|
75
|
12725
|
#!/usr/bin/env python
#
# Copyright 2007,2008,2010,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
try:
import scipy
from scipy import fftpack
except ImportError:
print "Please install SciPy to run this script (http://www.scipy.org/)"
raise SystemExit, 1
try:
from pylab import *
except ImportError:
print "Please install Matplotlib to run this script (http://matplotlib.sourceforge.net/)"
raise SystemExit, 1
from optparse import OptionParser
from scipy import log10
from gnuradio.eng_option import eng_option
class plot_psd_base:
def __init__(self, datatype, filename, options):
self.hfile = open(filename, "r")
self.block_length = options.block
self.start = options.start
self.sample_rate = options.sample_rate
self.psdfftsize = options.psd_size
self.specfftsize = options.spec_size
self.dospec = options.enable_spec # if we want to plot the spectrogram
self.datatype = getattr(scipy, datatype) #scipy.complex64
self.sizeof_data = self.datatype().nbytes # number of bytes per sample in file
self.axis_font_size = 16
self.label_font_size = 18
self.title_font_size = 20
self.text_size = 22
# Setup PLOT
self.fig = figure(1, figsize=(16, 12), facecolor='w')
rcParams['xtick.labelsize'] = self.axis_font_size
rcParams['ytick.labelsize'] = self.axis_font_size
self.text_file = figtext(0.10, 0.95, ("File: %s" % filename),
weight="heavy", size=self.text_size)
self.text_file_pos = figtext(0.10, 0.92, "File Position: ",
weight="heavy", size=self.text_size)
self.text_block = figtext(0.35, 0.92, ("Block Size: %d" % self.block_length),
weight="heavy", size=self.text_size)
self.text_sr = figtext(0.60, 0.915, ("Sample Rate: %.2f" % self.sample_rate),
weight="heavy", size=self.text_size)
self.make_plots()
self.button_left_axes = self.fig.add_axes([0.45, 0.01, 0.05, 0.05], frameon=True)
self.button_left = Button(self.button_left_axes, "<")
self.button_left_callback = self.button_left.on_clicked(self.button_left_click)
self.button_right_axes = self.fig.add_axes([0.50, 0.01, 0.05, 0.05], frameon=True)
self.button_right = Button(self.button_right_axes, ">")
self.button_right_callback = self.button_right.on_clicked(self.button_right_click)
self.xlim = scipy.array(self.sp_iq.get_xlim())
self.manager = get_current_fig_manager()
connect('draw_event', self.zoom)
connect('key_press_event', self.click)
show()
def get_data(self):
self.position = self.hfile.tell()/self.sizeof_data
self.text_file_pos.set_text("File Position: %d" % self.position)
try:
self.iq = scipy.fromfile(self.hfile, dtype=self.datatype, count=self.block_length)
except MemoryError:
print "End of File"
return False
else:
# retesting length here as newer version of scipy does not throw a MemoryError, just
# returns a zero-length array
if(len(self.iq) > 0):
tstep = 1.0 / self.sample_rate
#self.time = scipy.array([tstep*(self.position + i) for i in xrange(len(self.iq))])
self.time = scipy.array([tstep*(i) for i in xrange(len(self.iq))])
self.iq_psd, self.freq = self.dopsd(self.iq)
return True
else:
print "End of File"
return False
def dopsd(self, iq):
''' Need to do this here and plot later so we can do the fftshift '''
overlap = self.psdfftsize/4
winfunc = scipy.blackman
psd,freq = mlab.psd(iq, self.psdfftsize, self.sample_rate,
window = lambda d: d*winfunc(self.psdfftsize),
noverlap = overlap)
psd = 10.0*log10(abs(psd))
return (psd, freq)
def make_plots(self):
# if specified on the command-line, set file pointer
self.hfile.seek(self.sizeof_data*self.start, 1)
iqdims = [[0.075, 0.2, 0.4, 0.6], [0.075, 0.55, 0.4, 0.3]]
psddims = [[0.575, 0.2, 0.4, 0.6], [0.575, 0.55, 0.4, 0.3]]
specdims = [0.2, 0.125, 0.6, 0.3]
# Subplot for real and imaginary parts of signal
self.sp_iq = self.fig.add_subplot(2,2,1, position=iqdims[self.dospec])
self.sp_iq.set_title(("I&Q"), fontsize=self.title_font_size, fontweight="bold")
self.sp_iq.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_iq.set_ylabel("Amplitude (V)", fontsize=self.label_font_size, fontweight="bold")
# Subplot for PSD plot
self.sp_psd = self.fig.add_subplot(2,2,2, position=psddims[self.dospec])
self.sp_psd.set_title(("PSD"), fontsize=self.title_font_size, fontweight="bold")
self.sp_psd.set_xlabel("Frequency (Hz)", fontsize=self.label_font_size, fontweight="bold")
self.sp_psd.set_ylabel("Power Spectrum (dBm)", fontsize=self.label_font_size, fontweight="bold")
r = self.get_data()
self.plot_iq = self.sp_iq.plot([], 'bo-') # make plot for reals
self.plot_iq += self.sp_iq.plot([], 'ro-') # make plot for imags
self.draw_time(self.time, self.iq) # draw the plot
self.plot_psd = self.sp_psd.plot([], 'b') # make plot for PSD
self.draw_psd(self.freq, self.iq_psd) # draw the plot
if self.dospec:
# Subplot for spectrogram plot
self.sp_spec = self.fig.add_subplot(2,2,3, position=specdims)
self.sp_spec.set_title(("Spectrogram"), fontsize=self.title_font_size, fontweight="bold")
self.sp_spec.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_spec.set_ylabel("Frequency (Hz)", fontsize=self.label_font_size, fontweight="bold")
self.draw_spec(self.time, self.iq)
draw()
def draw_time(self, t, iq):
reals = iq.real
imags = iq.imag
self.plot_iq[0].set_data([t, reals])
self.plot_iq[1].set_data([t, imags])
self.sp_iq.set_xlim(t.min(), t.max())
self.sp_iq.set_ylim([1.5*min([reals.min(), imags.min()]),
1.5*max([reals.max(), imags.max()])])
def draw_psd(self, f, p):
self.plot_psd[0].set_data([f, p])
self.sp_psd.set_ylim([p.min()-10, p.max()+10])
self.sp_psd.set_xlim([f.min(), f.max()])
def draw_spec(self, t, s):
overlap = self.specfftsize/4
winfunc = scipy.blackman
self.sp_spec.clear()
self.sp_spec.specgram(s, self.specfftsize, self.sample_rate,
window = lambda d: d*winfunc(self.specfftsize),
noverlap = overlap, xextent=[t.min(), t.max()])
def update_plots(self):
self.draw_time(self.time, self.iq)
self.draw_psd(self.freq, self.iq_psd)
if self.dospec:
self.draw_spec(self.time, self.iq)
self.xlim = scipy.array(self.sp_iq.get_xlim()) # so zoom doesn't get called
draw()
def zoom(self, event):
newxlim = scipy.array(self.sp_iq.get_xlim())
curxlim = scipy.array(self.xlim)
if(newxlim[0] != curxlim[0] or newxlim[1] != curxlim[1]):
#xmin = max(0, int(ceil(self.sample_rate*(newxlim[0] - self.position))))
#xmax = min(int(ceil(self.sample_rate*(newxlim[1] - self.position))), len(self.iq))
xmin = max(0, int(ceil(self.sample_rate*(newxlim[0]))))
xmax = min(int(ceil(self.sample_rate*(newxlim[1]))), len(self.iq))
iq = scipy.array(self.iq[xmin : xmax])
time = scipy.array(self.time[xmin : xmax])
iq_psd, freq = self.dopsd(iq)
self.draw_psd(freq, iq_psd)
self.xlim = scipy.array(self.sp_iq.get_xlim())
draw()
def click(self, event):
forward_valid_keys = [" ", "down", "right"]
backward_valid_keys = ["up", "left"]
if(find(event.key, forward_valid_keys)):
self.step_forward()
elif(find(event.key, backward_valid_keys)):
self.step_backward()
def button_left_click(self, event):
self.step_backward()
def button_right_click(self, event):
self.step_forward()
def step_forward(self):
r = self.get_data()
if(r):
self.update_plots()
def step_backward(self):
# Step back in file position
if(self.hfile.tell() >= 2*self.sizeof_data*self.block_length ):
self.hfile.seek(-2*self.sizeof_data*self.block_length, 1)
else:
self.hfile.seek(-self.hfile.tell(),1)
r = self.get_data()
if(r):
self.update_plots()
@staticmethod
def setup_options():
usage="%prog: [options] input_filename"
description = "Takes a GNU Radio binary file (with specified data type using --data-type) and displays the I&Q data versus time as well as the power spectral density (PSD) plot. The y-axis values are plotted assuming volts as the amplitude of the I&Q streams and converted into dBm in the frequency domain (the 1/N power adjustment out of the FFT is performed internally). The script plots a certain block of data at a time, specified on the command line as -B or --block. The start position in the file can be set by specifying -s or --start and defaults to 0 (the start of the file). By default, the system assumes a sample rate of 1, so in time, each sample is plotted versus the sample number. To set a true time and frequency axis, set the sample rate (-R or --sample-rate) to the sample rate used when capturing the samples. Finally, the size of the FFT to use for the PSD and spectrogram plots can be set independently with --psd-size and --spec-size, respectively. The spectrogram plot does not display by default and is turned on with -S or --enable-spec."
parser = OptionParser(option_class=eng_option, conflict_handler="resolve",
usage=usage, description=description)
parser.add_option("-d", "--data-type", type="string", default="complex64",
help="Specify the data type (complex64, float32, (u)int32, (u)int16, (u)int8) [default=%default]")
parser.add_option("-B", "--block", type="int", default=8192,
help="Specify the block size [default=%default]")
parser.add_option("-s", "--start", type="int", default=0,
help="Specify where to start in the file [default=%default]")
parser.add_option("-R", "--sample-rate", type="eng_float", default=1.0,
help="Set the sampler rate of the data [default=%default]")
parser.add_option("", "--psd-size", type="int", default=1024,
help="Set the size of the PSD FFT [default=%default]")
parser.add_option("", "--spec-size", type="int", default=256,
help="Set the size of the spectrogram FFT [default=%default]")
parser.add_option("-S", "--enable-spec", action="store_true", default=False,
help="Turn on plotting the spectrogram [default=%default]")
return parser
def find(item_in, list_search):
try:
return list_search.index(item_in) != None
except ValueError:
return False
def main():
parser = plot_psd_base.setup_options()
(options, args) = parser.parse_args ()
if len(args) != 1:
parser.print_help()
raise SystemExit, 1
filename = args[0]
dc = plot_psd_base(options.data_type, filename, options)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
gpl-3.0
|
ClimbsRocks/scikit-learn
|
sklearn/neighbors/tests/test_approximate.py
|
55
|
19053
|
"""
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features).reshape(1, -1)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)].reshape(1, -1)
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)].reshape(1, -1)
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
@ignore_warnings
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [[1., 0.]]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slightly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
ignore_warnings(lshf.fit)(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consistent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
ignore_warnings(lshf.partial_fit)(X)
assert_array_equal(X, lshf._fit_X)
ignore_warnings(lshf.fit)(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
ignore_warnings(lshf.partial_fit)(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
ignore_warnings(lshf.fit)(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32).reshape(1, -1)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
ignore_warnings(lshf.fit)(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
|
bsd-3-clause
|
jostep/tensorflow
|
tensorflow/contrib/timeseries/examples/lstm.py
|
17
|
9460
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A more advanced example, of building an RNN-based time series model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
import numpy
import tensorflow as tf
from tensorflow.contrib.timeseries.python.timeseries import estimators as ts_estimators
from tensorflow.contrib.timeseries.python.timeseries import model as ts_model
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/multivariate_periods.csv")
class _LSTMModel(ts_model.SequentialTimeSeriesModel):
"""A time series model-building example using an RNNCell."""
def __init__(self, num_units, num_features, dtype=tf.float32):
"""Initialize/configure the model object.
Note that we do not start graph building here. Rather, this object is a
configurable factory for TensorFlow graphs which are run by an Estimator.
Args:
num_units: The number of units in the model's LSTMCell.
num_features: The dimensionality of the time series (features per
timestep).
dtype: The floating point data type to use.
"""
super(_LSTMModel, self).__init__(
# Pre-register the metrics we'll be outputting (just a mean here).
train_output_names=["mean"],
predict_output_names=["mean"],
num_features=num_features,
dtype=dtype)
self._num_units = num_units
# Filled in by initialize_graph()
self._lstm_cell = None
self._lstm_cell_run = None
self._predict_from_lstm_output = None
def initialize_graph(self, input_statistics):
"""Save templates for components, which can then be used repeatedly.
This method is called every time a new graph is created. It's safe to start
adding ops to the current default graph here, but the graph should be
constructed from scratch.
Args:
input_statistics: A math_utils.InputStatistics object.
"""
super(_LSTMModel, self).initialize_graph(input_statistics=input_statistics)
self._lstm_cell = tf.nn.rnn_cell.LSTMCell(num_units=self._num_units)
# Create templates so we don't have to worry about variable reuse.
self._lstm_cell_run = tf.make_template(
name_="lstm_cell",
func_=self._lstm_cell,
create_scope_now_=True)
# Transforms LSTM output into mean predictions.
self._predict_from_lstm_output = tf.make_template(
name_="predict_from_lstm_output",
func_=
lambda inputs: tf.layers.dense(inputs=inputs, units=self.num_features),
create_scope_now_=True)
def get_start_state(self):
"""Return initial state for the time series model."""
return (
# Keeps track of the time associated with this state for error checking.
tf.zeros([], dtype=tf.int64),
# The previous observation or prediction.
tf.zeros([self.num_features], dtype=self.dtype),
# The state of the RNNCell (batch dimension removed since this parent
# class will broadcast).
[tf.squeeze(state_element, axis=0)
for state_element
in self._lstm_cell.zero_state(batch_size=1, dtype=self.dtype)])
def _transform(self, data):
"""Normalize data based on input statistics to encourage stable training."""
mean, variance = self._input_statistics.overall_feature_moments
return (data - mean) / variance
def _de_transform(self, data):
"""Transform data back to the input scale."""
mean, variance = self._input_statistics.overall_feature_moments
return data * variance + mean
def _filtering_step(self, current_times, current_values, state, predictions):
"""Update model state based on observations.
Note that we don't do much here aside from computing a loss. In this case
it's easier to update the RNN state in _prediction_step, since that covers
running the RNN both on observations (from this method) and our own
predictions. This distinction can be important for probabilistic models,
where repeatedly predicting without filtering should lead to low-confidence
predictions.
Args:
current_times: A [batch size] integer Tensor.
current_values: A [batch size, self.num_features] floating point Tensor
with new observations.
state: The model's state tuple.
predictions: The output of the previous `_prediction_step`.
Returns:
A tuple of new state and a predictions dictionary updated to include a
loss (note that we could also return other measures of goodness of fit,
although only "loss" will be optimized).
"""
state_from_time, prediction, lstm_state = state
with tf.control_dependencies(
[tf.assert_equal(current_times, state_from_time)]):
transformed_values = self._transform(current_values)
# Use mean squared error across features for the loss.
predictions["loss"] = tf.reduce_mean(
(prediction - transformed_values) ** 2, axis=-1)
# Keep track of the new observation in model state. It won't be run
# through the LSTM until the next _imputation_step.
new_state_tuple = (current_times, transformed_values, lstm_state)
return (new_state_tuple, predictions)
def _prediction_step(self, current_times, state):
"""Advance the RNN state using a previous observation or prediction."""
_, previous_observation_or_prediction, lstm_state = state
lstm_output, new_lstm_state = self._lstm_cell_run(
inputs=previous_observation_or_prediction, state=lstm_state)
next_prediction = self._predict_from_lstm_output(lstm_output)
new_state_tuple = (current_times, next_prediction, new_lstm_state)
return new_state_tuple, {"mean": self._de_transform(next_prediction)}
def _imputation_step(self, current_times, state):
"""Advance model state across a gap."""
# Does not do anything special if we're jumping across a gap. More advanced
# models, especially probabilistic ones, would want a special case that
# depends on the gap size.
return state
def _exogenous_input_step(
self, current_times, current_exogenous_regressors, state):
"""Update model state based on exogenous regressors."""
raise NotImplementedError(
"Exogenous inputs are not implemented for this example.")
def train_and_predict(csv_file_name=_DATA_FILE, training_steps=200):
"""Train and predict using a custom time series model."""
# Construct an Estimator from our LSTM model.
estimator = ts_estimators.TimeSeriesRegressor(
model=_LSTMModel(num_features=5, num_units=128),
optimizer=tf.train.AdamOptimizer(0.001))
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES,)
+ (tf.contrib.timeseries.TrainEvalFeatures.VALUES,) * 5))
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=4, window_size=32)
estimator.train(input_fn=train_input_fn, steps=training_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Predict starting after the evaluation
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=100)))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, :]
predicted_mean = numpy.squeeze(numpy.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
all_times = numpy.concatenate([times, predictions["times"]], axis=0)
return times, observed, all_times, predicted_mean
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
(observed_times, observations,
all_times, predictions) = train_and_predict()
pyplot.axvline(99, linestyle="dotted")
observed_lines = pyplot.plot(
observed_times, observations, label="Observed", color="k")
predicted_lines = pyplot.plot(
all_times, predictions, label="Predicted", color="b")
pyplot.legend(handles=[observed_lines[0], predicted_lines[0]],
loc="upper left")
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
|
apache-2.0
|
fspaolo/scikit-learn
|
sklearn/pipeline.py
|
3
|
13218
|
"""
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Licence: BSD
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .externals.six import iteritems
__all__ = ['Pipeline', 'FeatureUnion']
# One round of beers on me if someone finds out why the backslash
# is needed in the Attributes section so as not to upset sphinx.
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implements fit and transform methods.
The final estimator needs only implements fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Parameters
----------
steps: list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svn
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y)
0.75
"""
# BaseEstimator interface
def __init__(self, steps):
self.named_steps = dict(steps)
names, estimators = zip(*steps)
if len(self.named_steps) != len(steps):
raise ValueError("Names provided are not unique: %s" % names)
# shallow copy of steps
self.steps = tosequence(zip(names, estimators))
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps a the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps.copy()
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator."""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict."""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba."""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function."""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
def predict_log_proba(self, X):
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform."""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
def inverse_transform(self, X):
if X.ndim == 1:
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score."""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _fit_one_transformer(transformer, X, y):
transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
if hasattr(transformer, 'fit_transform'):
return (transformer.fit_transform(X, y, **fit_params)
* transformer_weights[name])
else:
return (transformer.fit(X, y, **fit_params).transform(X)
* transformer_weights[name])
if hasattr(transformer, 'fit_transform'):
return transformer.fit_transform(X, y, **fit_params)
else:
return transformer.fit(X, y, **fit_params).transform(X)
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
|
bsd-3-clause
|
rsignell-usgs/notebook
|
Cartopy/4-panel-plot2.py
|
1
|
12662
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# # four_panel.py
# ## By: Kevin Goebbert
#
# Date: 5 October 2014
#
# This script uses 1 deg GFS model output via nomads URL to plot the 192-h
# forecast in a four panel plot.
#
# - UL Panel - MSLP, 1000-500 hPa Thickness, Precip (in)
# - UR Panel - 850-hPa Heights and Temperature ($^{\circ}$C)
# - LL Panel - 500-hPa Heights and Absolute Vorticity ($\times 10^{-5}$ s$^{-1}$)
# - LR Panel - 300-hPa Heights and Wind Speed (kts)
#
# Created using the Anaconda Distribution of Python 2.7
#
# Install Anaconda from http://continuum.io/downloads
#
# Then install binstar to use the conda package management software
#
# conda install binstar
#
# Then use conda to install two packages not a part of the main distribution.
#
# conda install basemap
# conda install netcdf4
# <markdowncell>
# Original: http://nbviewer.ipython.org/gist/rsignell-usgs/269d0e3d7c8b64eaa261
# <codecell>
import numpy as np
import numpy.ma as ma
from matplotlib.colors import Normalize
class MidpointNormalize(Normalize):
"""Help with the colormap for 850-hPa Temps."""
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
"""Ignoring masked values and all kinds of edge cases to make the
example simple."""
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.50, 1]
return ma.masked_array(np.interp(value, x, y))
# <markdowncell>
# Choose a model - right now this program is based on the 0.5 deg GFS output,
# there shouldn't be much to change for other GFS resolution sets, mainly smoothing
#
# To modify for another model you would need to know variable names and determine
# what array elements are associated with your view window and vertical levels.
# <codecell>
from datetime import datetime
model = 'gfs_0p50'
# model = 'gfs_2p50'
# model = 'gfs_1p00'
# model = 'gfs_0p25'
# Get the current hour from the computer and choose the proper model
# initialization time.
# These times are set based on when they appear on the nomads server.
currhr = int(datetime.utcnow().strftime('%H'))
if currhr >= 4 and currhr < 11:
run = '00'
elif currhr >= 11 and currhr < 17:
run = '06'
elif currhr >= 17 and currhr < 23:
run = '12'
else:
run = '18'
# <codecell>
import time
from time import mktime
# Setting some time and date strings for use in constructing the filename and for output.
# fdate looks like ... 20141005 ... and is used in constructing the filename
# date looks like ... 2014-10-05 00:00:00 and is used for output
if (run == '18') and (currhr < 3): # Because we need to go back to the previous day.
date1 = (datetime.utcnow() - timedelta(hours=24)).strftime('%Y%m%d') + run
fdate = (datetime.utcnow() - timedelta(hours=24)).strftime('%Y%m%d')
else:
date1 = datetime.utcnow().strftime('%Y%m%d') + run
fdate = datetime.utcnow().strftime('%Y%m%d')
date = time.strptime(date1, "%Y%m%d%H")
date = datetime.fromtimestamp(mktime(date))
# If you wanted to run a past date (I think they keep a week or two on the website), then
# just uncomment the following three lines to not use the current model run.
# date = '20141005 00:00:00'
# fdate = '20141005'
# run = '00'
print(date)
# <codecell>
uri = "http://nomads.ncep.noaa.gov:9090/dods/{}/{}{}/{}_{}z".format
url = uri(model, model[0:3], fdate, model, run)
url
# <codecell>
# Using iris to get the data.
import iris
cubes = iris.load(url)
print(cubes)
# <markdowncell>
# ### Pulling in only the data we need
# <codecell>
# Awfully long lon_names and no standard names...
for cube in cubes:
if cube.standard_name:
print(cube.standard_name)
# <codecell>
from iris import Constraint
# ... because of that this part is awkward.
def make_constraint(var_name="hgtprs"):
return Constraint(cube_func=lambda cube: cube.var_name == var_name)
hgtprs = cubes.extract_strict(make_constraint("hgtprs"))
print(hgtprs)
# <codecell>
times = hgtprs.coord(axis='T')
resolution = times.attributes['resolution']
print("The time resolution is {} days".format(resolution))
time_step = 24 * resolution
print("The time step between forecast hours is {} hours".format(time_step))
# <codecell>
# But this one makes it worth using iris.
# Set US Bounds for GFS 1 deg data.
lon = Constraint(longitude=lambda x: 220 <= x <= 310)
lat = Constraint(latitude=lambda y: 20 <= y <= 70)
z300 = Constraint(altitude=lambda z: z == 300)
z500 = Constraint(altitude=lambda z: z == 500)
z850 = Constraint(altitude=lambda z: z == 850)
z1000 = Constraint(altitude=lambda z: z == 1000)
hght850 = hgtprs.extract(lon & lat & z850)
# or use the cubes directly.
hght500 = cubes.extract_strict(make_constraint("hgtprs") & lon & lat & z500)
hght1000 = cubes.extract_strict(make_constraint("hgtprs") & lon & lat & z1000)
temp850 = cubes.extract_strict(make_constraint("tmpprs") & lon & lat & z850)
avor500 = cubes.extract_strict(make_constraint("absvprs") & lon & lat & z500)
hght300 = cubes.extract_strict(make_constraint("hgtprs") & lon & lat & z300)
uwnd_300 = cubes.extract_strict(make_constraint("ugrdprs") & lon & lat & z300)
vwnd_300 = cubes.extract_strict(make_constraint("vgrdprs") & lon & lat & z300)
mslp = cubes.extract_strict(make_constraint("prmslmsl") & lon & lat)
precip = cubes.extract_strict(make_constraint("apcpsfc") & lon & lat)
# <codecell>
# Specify units since Nomads doesn't provide!
# We can then use Iris to convert units later
temp850.units='Kelvin'
uwnd_300.units='m/s'
vwnd_300.units='m/s'
precip.units = 'mm'
# <markdowncell>
# ### For a few variables we want to smooth the data to remove non-synoptic scale wiggles
# <codecell>
import scipy.ndimage as ndimage
kw = dict(sigma=1.5, order=0)
Z_1000 = ndimage.gaussian_filter(hght1000.data, **kw)
Z_850 = ndimage.gaussian_filter(hght850.data, **kw)
Z_500 = ndimage.gaussian_filter(hght500.data, **kw)
Z_300 = ndimage.gaussian_filter(hght300.data, **kw)
pmsl = ndimage.gaussian_filter(mslp.data/100., **kw)
# <markdowncell>
# # Convert data to common formats
# <codecell>
temp850.convert_units(iris.unit.Unit('Celsius'))
tmpc850 = temp850.data
avor500.data *= 1e5 # preserve cube info here
uwnd_300.convert_units(iris.unit.Unit('knots'))
vwnd_300.convert_units(iris.unit.Unit('knots'))
wspd300 = np.sqrt(uwnd_300.data**2 + vwnd_300.data**2)
precip.convert_units(iris.unit.Unit('inch'))
pcpin = precip.data
# <codecell>
# Set number of forecast hours from hght850 data
fhours = times.points.size
print("Number of steps in the loop for all forecast hours in the dataset = {}".format(fhours))
# <codecell>
# Set countour intervals for various parameters.
countours = dict(clevpmsl=range(900, 1100, 4),
clevtmpf2m=range(0, 120, 2),
clev850=range(1200, 1800, 30),
clevrh700=[70, 80, 90],
clevtmpc850=range(-30, 40, 2),
clev500=range(5100, 6000, 60),
clevavor500=range(-4, 1) + range(7, 49, 3),
clev300=range(8160, 10080, 120),
clevsped300=range(50, 230, 20),
clevprecip=[0, 0.01, 0.03, 0.05,
0.10, 0.15, 0.20, 0.25,
0.30, 0.40, 0.50, 0.60,
0.70, 0.80, 0.90, 1.00,
1.25, 1.50, 1.75, 2.00, 2.50])
# Colors for Vorticity.
colorsavor500 = ('#660066', '#660099', '#6600CC', '#6600FF',
'w', '#ffE800', '#ffD800', '#ffC800', '#ffB800',
'#ffA800', '#ff9800', '#ff8800', '#ff7800', '#ff6800',
'#ff5800', '#ff5000', '#ff4000', '#ff3000')
# <codecell>
from oceans import wrap_lon180
# Subset the lats and lons from the model according to view desired.
clats1 = hght300.coord(axis='Y').points
clons1 = wrap_lon180(hght300.coord(axis='X').points)
# Make a grid of lat/lon values to use for plotting with Basemap.
clats, clons = np.meshgrid(clons1, clats1)
# <codecell>
%matplotlib inline
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
from cartopy.mpl import geoaxes
import cartopy.feature as cfeature
from mpl_toolkits.basemap import cm
def make_map(ax):
kw = dict(edgecolor='gray', linewidth=0.5)
ax.set_extent([-135, -55, 20, 60])
ax.coastlines(**kw)
ax.add_feature(states_provinces, **kw)
ax.add_feature(cfeature.BORDERS, **kw)
states_provinces = cfeature.NaturalEarthFeature(
category='cultural',
name='admin_1_states_provinces_lines',
scale='50m',
facecolor='none')
# Options.
colorbar = dict(orientation='horizontal', extend='both', aspect=65, shrink=0.913, pad=0, extendrect='True')
clabel = dict(inline=1, inline_spacing=10, fmt='%i', rightside_up=True)
def update_figure(fh):
date = times.units.num2date(times.points[fh])
global ax, fig
[ax.cla() for ax in axes.ravel()]
ax0, ax1, ax2, ax3 = axes.ravel()
if fh == 0:
# Clear colorbar when 0 is repeated.
for a in fig.axes:
if not isinstance(a, geoaxes.GeoAxes):
fig.delaxes(a)
# Upper-left panel MSLP, 1000-500 hPa Thickness, Precip (in).
make_map(ax0)
cmap = cm.s3pcpn_l
if fh % 2 != 0:
data = pcpin[fh, ...]
else:
data = pcpin[fh, ...] - pcpin[fh-1, ...]
cf = ax0.contourf(clons1, clats1, data,
countours['clevprecip'], cmap=cmap)
if fh == 0: # Only draw colorbar once!
cbar = fig.colorbar(cf, ax=ax0, **colorbar)
cs2 = ax0.contour(clats, clons, Z_500[fh, ...]-Z_1000[fh, ...], countours['clev500'], colors='r', linewidths=1.5,
linestyles='dashed')
cs = ax0.contour(clats, clons, pmsl[fh, ...], countours['clevpmsl'], colors='k', linewidths=1.5)
ax0.clabel(cs, fontsize=8, **clabel)
ax0.clabel(cs2, fontsize=7, **clabel)
ax0.set_title('MSLP (hPa), 2m TMPF, and Precip',loc='left')
ax0.set_title('VALID: {}'.format(date), loc='right')
# Upper-right panel 850-hPa Heights and Temp (C).
make_map(ax1)
cmap = plt.cm.jet
norm = MidpointNormalize(midpoint=10)
cf = ax1.contourf(clats, clons, tmpc850[fh, ...], countours['clevtmpc850'], cmap=cmap, norm=norm, extend='both')
if fh == 0: # Only draw colorbar once!
cbar = fig.colorbar(cf, ax=ax1, **colorbar)
cs = ax1.contour(clats, clons, Z_850[fh, ...], countours['clev850'], colors='k', linewidths=1.5)
ax1.clabel(cs, fontsize=8, **clabel)
ax1.set_title('850-hPa HGHTs (m) and TMPC', loc='left')
ax1.set_title('VALID: {}'.format(date), loc='right')
# Lower-left panel 500-hPa Heights and AVOR.
make_map(ax2)
cf = ax2.contourf(clats, clons, avor500[fh, ...].data, countours['clevavor500'], colors=colorsavor500, extend='both')
if fh == 0: # Only draw colorbar once!
cbar = fig.colorbar(cf, ax=ax2, **colorbar)
cs = ax2.contour(clats, clons, Z_500[fh, ...], countours['clev500'], colors='k', linewidths=1.5)
ax2.clabel(cs, fontsize=8, **clabel)
ax2.set_title(r'500-hPa HGHTs (m) and AVOR ($\times 10^5$ s$^{-1}$)', loc='left')
ax2.set_title('VALID: {}'.format(date), loc='right')
# Lower-right panel 300-hPa Heights and Wind Speed (kts).
make_map(ax3)
cmap = plt.cm.get_cmap("BuPu")
cf = ax3.contourf(clats, clons, wspd300[fh, ...], countours['clevsped300'], cmap=cmap, extend='max')
if fh == 0: # Only draw colorbar once!
cbar = fig.colorbar(cf, ax=ax3, **colorbar)
cs = ax3.contour(clats, clons, Z_300[fh, ...], countours['clev300'], colors='k', linewidths=1.5)
ax3.clabel(cs, fontsize=8, **clabel)
ax3.set_title('300-hPa HGHTs (m) and SPED (kts)', loc='left')
ax3.set_title('VALID: {}'.format(date), loc='right')
# To make a nicer layout use plt.tight_layout()
fig.tight_layout()
# Add room at the top of the plot for a main title
fig.subplots_adjust(top=0.90)
fig.suptitle('{} GFS {}Z'.format(fdate, run), fontsize=20)
# <markdowncell>
# ### Loop over the forecast hours. Current setting is use all 192-h at the 1 deg resolution
# <codecell>
cd /usgs/data2/notebook/tmp
# <codecell>
from JSAnimation import IPython_display
from matplotlib.animation import FuncAnimation
kw = dict(nrows=2, ncols=2, sharex=True, sharey=True,)
fig, axes = plt.subplots(subplot_kw=dict(projection=ccrs.Miller(central_longitude=0)),
figsize=(17, 12), **kw)
FuncAnimation(fig, update_figure, interval=100, frames=times.points.size)
# <codecell>
|
mit
|
michael-pacheco/dota2-predictor
|
visualizing/hero_map.py
|
2
|
13950
|
import collections
import logging
import math
import random
import numpy as np
import pandas as pd
import plotly.plotly as py
import plotly.graph_objs as go
import tensorflow as tf
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
from tools.metadata import get_hero_dict, get_last_patch
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
data_index = 0
def _build_vocabulary(words, vocabulary_size):
""" Creates a dictionary representing a vocabulary and counts the appearances of each word
In this context, each word represents a hero's index casted to string e.g. Anti-Mage -> "1"
Args:
words: list of strings representing the corpus
vocabulary_size: number of words to be evaluated (the vocabulary will contain only this
number of words, even if there are more unique words in the corpus)
Returns:
data: list of indices obtained by mapping the words' indices to the corpus
count: list of [word, appearances] for the corpus
dictionary: the vocabulary (the key is the word, and the value is the appearances)
reverse_dictionary: the reversed vocabulary (the key are the appearances and the values
are the words)
"""
# create dictionary with the most common heroes
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
# the word is unknown
index = 0
unk_count = unk_count + 1
data.append(index)
count[0][1] = unk_count
# save the dictionary's reversed version for later usage
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
def _generate_batch(data, batch_size, num_skips, window_size):
""" Generates a batch of data to be used in training using the skip-gram flavor of word2vec
Args:
data: list of indices obtained by mapping the words' indices to the corpus
batch_size: number of samples to be used in a batch
num_skips: number of skips hyperparameter of word2vec
window_size: window size hyperparameter of word2vec
Returns:
batch: batch of data to be used in training
labels: labels of each sample in the batch
"""
global data_index
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * window_size + 1
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
# target label at the center of the buffer
target = window_size
targets_to_avoid = [window_size]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[window_size]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels
def _train_word2vec(data,
batch_size,
vocabulary_size,
embedding_size,
neg_samples,
window_size,
num_steps,
reverse_dictionary,
heroes_dict):
""" Given input data and hyperparameters, train the dataset of games using word2vec with
skip-gram flavor
Args:
data: list of indices obtained by mapping the words' indices to the corpus
batch_size: number of samples to be used in a batch
vocabulary_size: number of words to be evaluated (the vocabulary will contain only this
number of words, even if there are more unique words in the corpus)
embedding_size: number of dimensions when creating word embeddings
neg_samples: word2vec negative samples hyperparameter
window_size: word2vec window size hyperparameter
num_steps: number of steps to train for at (at least 10k should be fine)
window_size: window size hyperparameter of
reverse_dictionary: the reversed vocabulary (the key are the appearances and the values
are the words)
heroes_dict: dictionary that maps the hero's ID to its name
Returns:
final_embeddings: np.array of (samples, embedding_size) dimension corresponding to each
hero's embeddings
"""
valid_size = 15
valid_examples = np.random.randint(0, vocabulary_size, valid_size)
graph = tf.Graph()
with graph.as_default(), tf.device('/cpu:0'):
train_dataset = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.float32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
softmax_weights = tf.Variable(tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))
embed = tf.nn.embedding_lookup(embeddings, train_dataset)
loss = tf.reduce_mean(
tf.nn.sampled_softmax_loss(softmax_weights,
softmax_biases,
train_labels,
embed,
neg_samples,
vocabulary_size))
optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)
similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings))
with tf.Session(graph=graph) as session:
session.run(tf.global_variables_initializer())
logger.info('Initialized graph')
average_loss = 0
for step in range(num_steps):
batch_data, batch_labels = _generate_batch(data,
batch_size,
2 * window_size,
window_size)
feed_dict = {train_dataset: batch_data, train_labels: batch_labels}
_, new_loss = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += new_loss
# print the loss every 2k steps
if step % 2000 == 0:
if step > 0:
average_loss = average_loss / 2000
logger.info('Average loss at step %d: %f', step, average_loss)
average_loss = 0
# print a sample of similarities between heroes every 10k steps
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
# ignore unknown and padding tokens
if valid_word != 'UNK' and valid_word != 'PAD':
valid_word = heroes_dict[int(reverse_dictionary[valid_examples[i]])]
top_k = 8 # number of nearest neighbors to print
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log = 'Nearest to %s:' % valid_word
for k in xrange(top_k):
index = reverse_dictionary[nearest[k]]
if index != 'UNK' and index != 'PAD':
close_word = heroes_dict[int(index)]
else:
close_word = index
log = '%s %s,' % (log, close_word)
logger.info(log)
final_embeddings = normalized_embeddings.eval()
return final_embeddings
def _plot_similarities(embeddings, heroes_dict, reverse_dictionary, perplexity=20):
""" Plot the obtained hero embeddings using TSNE algorithm in 2D space.
There are 4 assumed roles: Mid, Carry, Offlaner, Support, each category containing a
representative hardcoded hero in order to correctly identify each cluster's role.
Args:
embeddings: hero embeddings obtained after training
heroes_dict: dictionary that maps the hero's ID to its name
reverse_dictionary: the reversed vocabulary (the key are the appearances and the values
are the words)
perplexity: hyperparameter of TSNE (15-30 seems to work best)
"""
# Reduce the embeddings to 2D
tsne = TSNE(n_components=2, perplexity=perplexity, random_state=42)
two_d_embeddings = tsne.fit_transform(embeddings)
# Apply KMeans on the data in order to clusterize by role
kmeans = KMeans(n_clusters=4, n_jobs=-1)
kmeans.fit(tsne.embedding_)
labels = kmeans.labels_
labels = labels[2:]
x_vals = two_d_embeddings[:, 0]
y_vals = two_d_embeddings[:, 1]
number_of_heroes = len(heroes_dict.keys())
names = number_of_heroes * ['']
for i in range(number_of_heroes):
names[i] = heroes_dict[int(reverse_dictionary[i + 2])]
x_vals = list(x_vals)
y_vals = list(y_vals)
# delete 'UNK' and 'PAD' when plotting
del x_vals[1]
del x_vals[0]
del y_vals[1]
del y_vals[0]
traces = []
for cluster in range(max(labels) + 1):
indices = []
for i in range(len(labels)):
if labels[i] == cluster:
indices.append(i)
cluster_text = 'Mixed'
heroes_in_cluster = [names[i] for i in indices]
if 'Terrorblade' in heroes_in_cluster:
cluster_text = 'Carry'
elif 'Shadow Fiend' in heroes_in_cluster:
cluster_text = 'Mid'
elif 'Batrider' in heroes_in_cluster:
cluster_text = 'Offlane'
elif 'Dazzle' in heroes_in_cluster:
cluster_text = 'Support'
trace = go.Scatter(
x=[x_vals[i] for i in indices],
y=[y_vals[i] for i in indices],
mode='markers+text',
text=[names[i] for i in indices],
name=cluster_text,
textposition='top'
)
traces.append(trace)
layout = go.Layout(
title='Hero map test',
xaxis=dict(
autorange=True,
showgrid=False,
zeroline=False,
showline=False,
autotick=True,
ticks='',
showticklabels=False
),
yaxis=dict(
autorange=True,
showgrid=False,
zeroline=False,
showline=False,
autotick=True,
ticks='',
showticklabels=False
)
)
data = traces
figure = go.Figure(data=data, layout=layout)
py.iplot(figure, filename='heromap')
def plot_hero_map(csv_path,
batch_size=128,
embedding_size=25,
window_size=2,
neg_samples=64,
num_steps=30001,
low_mmr=0,
high_mmr=9000):
""" Creates a 2D plot of the heroes based on their similarity obtained with word2vec. The result
is uploaded to plotly.
Args:
csv_path: path to the training dataset csv
batch_size: size of the batch to be used in training
embedding_size: number of dimensions when creating word embeddings
window_size: word2vec window size hyperparameter
neg_samples: word2vec negative samples hyperparameter
num_steps: number of steps to train for at (at least 10k should be fine)
low_mmr: lower bound of the MMRs filtered for plotting
high_mmr: upper bound of the MMRs filtered for plotting
"""
patch = get_last_patch()
heroes_dict = get_hero_dict()
dataset = pd.read_csv(csv_path)
# filter the games by MMR and transform the dataset to a numpy array
dataset = dataset[(dataset.avg_mmr > low_mmr) & (dataset.avg_mmr < high_mmr)].values
vocabulary_size = patch['heroes_released'] + 1
words = []
# create corpus by separating each team and adding padding
for match in dataset:
radiant_list = match[2].split(',')
dire_list = match[3].split(',')
words.extend(radiant_list)
words.append('PAD')
words.append('PAD')
words.extend(dire_list)
words.append('PAD')
words.append('PAD')
# create vocabulary using the corpus
data, count, dictionary, reverse_dictionary = _build_vocabulary(words, vocabulary_size)
logger.info('Most common heroes (+UNK): %s', count[:5])
logger.info('Sample data: %s', data[:10])
# free unused memory
del words
final_embeddings = _train_word2vec(data,
batch_size,
vocabulary_size,
embedding_size,
neg_samples,
window_size,
num_steps,
reverse_dictionary,
heroes_dict)
_plot_similarities(final_embeddings, heroes_dict, reverse_dictionary)
|
mit
|
waqasbhatti/astrobase
|
astrobase/lcproc/varthreshold.py
|
2
|
31916
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# varthreshold.py - Waqas Bhatti ([email protected]) - Feb 2019
'''
This contains functions to investigate where to set a threshold for
several variability indices to distinguish between variable and non-variable
stars.
'''
#############
## LOGGING ##
#############
import logging
from astrobase import log_sub, log_fmt, log_date_fmt
DEBUG = False
if DEBUG:
level = logging.DEBUG
else:
level = logging.INFO
LOGGER = logging.getLogger(__name__)
logging.basicConfig(
level=level,
style=log_sub,
format=log_fmt,
datefmt=log_date_fmt,
)
LOGDEBUG = LOGGER.debug
LOGINFO = LOGGER.info
LOGWARNING = LOGGER.warning
LOGERROR = LOGGER.error
LOGEXCEPTION = LOGGER.exception
#############
## IMPORTS ##
#############
import pickle
import os
import os.path
import glob
import multiprocessing as mp
# to turn a list of keys into a dict address
# from https://stackoverflow.com/a/14692747
from functools import reduce
from operator import getitem
def _dict_get(datadict, keylist):
return reduce(getitem, keylist, datadict)
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
try:
from tqdm import tqdm
TQDM = True
except Exception:
TQDM = False
pass
############
## CONFIG ##
############
NCPUS = mp.cpu_count()
###################
## LOCAL IMPORTS ##
###################
from astrobase.magnitudes import jhk_to_sdssr
from astrobase.lcproc import get_lcformat
###########################
## VARIABILITY THRESHOLD ##
###########################
DEFAULT_MAGBINS = np.arange(8.0,16.25,0.25)
def variability_threshold(featuresdir,
outfile,
magbins=DEFAULT_MAGBINS,
maxobjects=None,
timecols=None,
magcols=None,
errcols=None,
lcformat='hat-sql',
lcformatdir=None,
min_lcmad_stdev=5.0,
min_stetj_stdev=2.0,
min_iqr_stdev=2.0,
min_inveta_stdev=2.0,
verbose=True):
'''This generates a list of objects with stetson J, IQR, and 1.0/eta
above some threshold value to select them as potential variable stars.
Use this to pare down the objects to review and put through
period-finding. This does the thresholding per magnitude bin; this should be
better than one single cut through the entire magnitude range. Set the
magnitude bins using the magbins kwarg.
FIXME: implement a voting classifier here. this will choose variables based
on the thresholds in IQR, stetson, and inveta based on weighting carried
over from the variability recovery sims.
Parameters
----------
featuresdir : str
This is the directory containing variability feature pickles created by
:py:func:`astrobase.lcproc.lcpfeatures.parallel_varfeatures` or similar.
outfile : str
This is the output pickle file that will contain all the threshold
information.
magbins : np.array of floats
This sets the magnitude bins to use for calculating thresholds.
maxobjects : int or None
This is the number of objects to process. If None, all objects with
feature pickles in `featuresdir` will be processed.
timecols : list of str or None
The timecol keys to use from the lcdict in calculating the thresholds.
magcols : list of str or None
The magcol keys to use from the lcdict in calculating the thresholds.
errcols : list of str or None
The errcol keys to use from the lcdict in calculating the thresholds.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
min_lcmad_stdev,min_stetj_stdev,min_iqr_stdev,min_inveta_stdev : float or np.array
These are all the standard deviation multiplier for the distributions of
light curve standard deviation, Stetson J variability index, the light
curve interquartile range, and 1/eta variability index
respectively. These multipliers set the minimum values of these measures
to use for selecting variable stars. If provided as floats, the same
value will be used for all magbins. If provided as np.arrays of `size =
magbins.size - 1`, will be used to apply possibly different sigma cuts
for each magbin.
verbose : bool
If True, will report progress and warn about any problems.
Returns
-------
dict
Contains all of the variability threshold information along with indices
into the array of the object IDs chosen as variables.
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception:
LOGEXCEPTION("can't figure out the light curve format")
return None
# override the default timecols, magcols, and errcols
# using the ones provided to the function
if timecols is None:
timecols = dtimecols
if magcols is None:
magcols = dmagcols
if errcols is None:
errcols = derrcols
# list of input pickles generated by varfeatures functions above
pklist = glob.glob(os.path.join(featuresdir, 'varfeatures-*.pkl'))
if maxobjects:
pklist = pklist[:maxobjects]
allobjects = {}
for magcol in magcols:
# keep local copies of these so we can fix them independently in case of
# nans
if (isinstance(min_stetj_stdev, list) or
isinstance(min_stetj_stdev, np.ndarray)):
magcol_min_stetj_stdev = min_stetj_stdev[::]
else:
magcol_min_stetj_stdev = min_stetj_stdev
if (isinstance(min_iqr_stdev, list) or
isinstance(min_iqr_stdev, np.ndarray)):
magcol_min_iqr_stdev = min_iqr_stdev[::]
else:
magcol_min_iqr_stdev = min_iqr_stdev
if (isinstance(min_inveta_stdev, list) or
isinstance(min_inveta_stdev, np.ndarray)):
magcol_min_inveta_stdev = min_inveta_stdev[::]
else:
magcol_min_inveta_stdev = min_inveta_stdev
LOGINFO('getting all object sdssr, LC MAD, stet J, IQR, eta...')
# we'll calculate the sigma per magnitude bin, so get the mags as well
allobjects[magcol] = {
'objectid':[],
'sdssr':[],
'lcmad':[],
'stetsonj':[],
'iqr':[],
'eta':[]
}
# fancy progress bar with tqdm if present
if TQDM and verbose:
listiterator = tqdm(pklist)
else:
listiterator = pklist
for pkl in listiterator:
with open(pkl,'rb') as infd:
thisfeatures = pickle.load(infd)
objectid = thisfeatures['objectid']
# the object magnitude
if ('info' in thisfeatures and
thisfeatures['info'] and
'sdssr' in thisfeatures['info']):
if (thisfeatures['info']['sdssr'] and
thisfeatures['info']['sdssr'] > 3.0):
sdssr = thisfeatures['info']['sdssr']
elif (magcol in thisfeatures and
thisfeatures[magcol] and
'median' in thisfeatures[magcol] and
thisfeatures[magcol]['median'] > 3.0):
sdssr = thisfeatures[magcol]['median']
elif (thisfeatures['info']['jmag'] and
thisfeatures['info']['hmag'] and
thisfeatures['info']['kmag']):
sdssr = jhk_to_sdssr(thisfeatures['info']['jmag'],
thisfeatures['info']['hmag'],
thisfeatures['info']['kmag'])
else:
sdssr = np.nan
else:
sdssr = np.nan
# the MAD of the light curve
if (magcol in thisfeatures and
thisfeatures[magcol] and
thisfeatures[magcol]['mad']):
lcmad = thisfeatures[magcol]['mad']
else:
lcmad = np.nan
# stetson index
if (magcol in thisfeatures and
thisfeatures[magcol] and
thisfeatures[magcol]['stetsonj']):
stetsonj = thisfeatures[magcol]['stetsonj']
else:
stetsonj = np.nan
# IQR
if (magcol in thisfeatures and
thisfeatures[magcol] and
thisfeatures[magcol]['mag_iqr']):
iqr = thisfeatures[magcol]['mag_iqr']
else:
iqr = np.nan
# eta
if (magcol in thisfeatures and
thisfeatures[magcol] and
thisfeatures[magcol]['eta_normal']):
eta = thisfeatures[magcol]['eta_normal']
else:
eta = np.nan
allobjects[magcol]['objectid'].append(objectid)
allobjects[magcol]['sdssr'].append(sdssr)
allobjects[magcol]['lcmad'].append(lcmad)
allobjects[magcol]['stetsonj'].append(stetsonj)
allobjects[magcol]['iqr'].append(iqr)
allobjects[magcol]['eta'].append(eta)
#
# done with collection of info
#
LOGINFO('finding objects above thresholds per magbin...')
# turn the info into arrays
allobjects[magcol]['objectid'] = np.ravel(np.array(
allobjects[magcol]['objectid']
))
allobjects[magcol]['sdssr'] = np.ravel(np.array(
allobjects[magcol]['sdssr']
))
allobjects[magcol]['lcmad'] = np.ravel(np.array(
allobjects[magcol]['lcmad']
))
allobjects[magcol]['stetsonj'] = np.ravel(np.array(
allobjects[magcol]['stetsonj']
))
allobjects[magcol]['iqr'] = np.ravel(np.array(
allobjects[magcol]['iqr']
))
allobjects[magcol]['eta'] = np.ravel(np.array(
allobjects[magcol]['eta']
))
# only get finite elements everywhere
thisfinind = (
np.isfinite(allobjects[magcol]['sdssr']) &
np.isfinite(allobjects[magcol]['lcmad']) &
np.isfinite(allobjects[magcol]['stetsonj']) &
np.isfinite(allobjects[magcol]['iqr']) &
np.isfinite(allobjects[magcol]['eta'])
)
allobjects[magcol]['objectid'] = allobjects[magcol]['objectid'][
thisfinind
]
allobjects[magcol]['sdssr'] = allobjects[magcol]['sdssr'][thisfinind]
allobjects[magcol]['lcmad'] = allobjects[magcol]['lcmad'][thisfinind]
allobjects[magcol]['stetsonj'] = allobjects[magcol]['stetsonj'][
thisfinind
]
allobjects[magcol]['iqr'] = allobjects[magcol]['iqr'][thisfinind]
allobjects[magcol]['eta'] = allobjects[magcol]['eta'][thisfinind]
# invert eta so we can threshold the same way as the others
allobjects[magcol]['inveta'] = 1.0/allobjects[magcol]['eta']
# do the thresholding by magnitude bin
magbininds = np.digitize(allobjects[magcol]['sdssr'],
magbins)
binned_objectids = []
binned_sdssr = []
binned_sdssr_median = []
binned_lcmad = []
binned_stetsonj = []
binned_iqr = []
binned_inveta = []
binned_count = []
binned_objectids_thresh_stetsonj = []
binned_objectids_thresh_iqr = []
binned_objectids_thresh_inveta = []
binned_objectids_thresh_all = []
binned_lcmad_median = []
binned_lcmad_stdev = []
binned_stetsonj_median = []
binned_stetsonj_stdev = []
binned_inveta_median = []
binned_inveta_stdev = []
binned_iqr_median = []
binned_iqr_stdev = []
# go through all the mag bins and get the thresholds for J, inveta, IQR
for mbinind, magi in zip(np.unique(magbininds),
range(len(magbins)-1)):
thisbinind = np.where(magbininds == mbinind)
thisbin_sdssr_median = (magbins[magi] + magbins[magi+1])/2.0
binned_sdssr_median.append(thisbin_sdssr_median)
thisbin_objectids = allobjects[magcol]['objectid'][thisbinind]
thisbin_sdssr = allobjects[magcol]['sdssr'][thisbinind]
thisbin_lcmad = allobjects[magcol]['lcmad'][thisbinind]
thisbin_stetsonj = allobjects[magcol]['stetsonj'][thisbinind]
thisbin_iqr = allobjects[magcol]['iqr'][thisbinind]
thisbin_inveta = allobjects[magcol]['inveta'][thisbinind]
thisbin_count = thisbin_objectids.size
if thisbin_count > 4:
thisbin_lcmad_median = np.median(thisbin_lcmad)
thisbin_lcmad_stdev = np.median(
np.abs(thisbin_lcmad - thisbin_lcmad_median)
) * 1.483
binned_lcmad_median.append(thisbin_lcmad_median)
binned_lcmad_stdev.append(thisbin_lcmad_stdev)
thisbin_stetsonj_median = np.median(thisbin_stetsonj)
thisbin_stetsonj_stdev = np.median(
np.abs(thisbin_stetsonj - thisbin_stetsonj_median)
) * 1.483
binned_stetsonj_median.append(thisbin_stetsonj_median)
binned_stetsonj_stdev.append(thisbin_stetsonj_stdev)
# now get the objects above the required stdev threshold
if isinstance(magcol_min_stetj_stdev, float):
thisbin_objectids_thresh_stetsonj = thisbin_objectids[
thisbin_stetsonj > (
thisbin_stetsonj_median +
magcol_min_stetj_stdev*thisbin_stetsonj_stdev
)
]
elif (isinstance(magcol_min_stetj_stdev, np.ndarray) or
isinstance(magcol_min_stetj_stdev, list)):
thisbin_min_stetj_stdev = magcol_min_stetj_stdev[magi]
if not np.isfinite(thisbin_min_stetj_stdev):
LOGWARNING('provided threshold stetson J stdev '
'for magbin: %.3f is nan, using 2.0' %
thisbin_sdssr_median)
thisbin_min_stetj_stdev = 2.0
# update the input list/array as well, since we'll be
# saving it to the output dict and using it to plot the
# variability thresholds
magcol_min_stetj_stdev[magi] = 2.0
thisbin_objectids_thresh_stetsonj = thisbin_objectids[
thisbin_stetsonj > (
thisbin_stetsonj_median +
thisbin_min_stetj_stdev*thisbin_stetsonj_stdev
)
]
thisbin_iqr_median = np.median(thisbin_iqr)
thisbin_iqr_stdev = np.median(
np.abs(thisbin_iqr - thisbin_iqr_median)
) * 1.483
binned_iqr_median.append(thisbin_iqr_median)
binned_iqr_stdev.append(thisbin_iqr_stdev)
# get the objects above the required stdev threshold
if isinstance(magcol_min_iqr_stdev, float):
thisbin_objectids_thresh_iqr = thisbin_objectids[
thisbin_iqr > (thisbin_iqr_median +
magcol_min_iqr_stdev*thisbin_iqr_stdev)
]
elif (isinstance(magcol_min_iqr_stdev, np.ndarray) or
isinstance(magcol_min_iqr_stdev, list)):
thisbin_min_iqr_stdev = magcol_min_iqr_stdev[magi]
if not np.isfinite(thisbin_min_iqr_stdev):
LOGWARNING('provided threshold IQR stdev '
'for magbin: %.3f is nan, using 2.0' %
thisbin_sdssr_median)
thisbin_min_iqr_stdev = 2.0
# update the input list/array as well, since we'll be
# saving it to the output dict and using it to plot the
# variability thresholds
magcol_min_iqr_stdev[magi] = 2.0
thisbin_objectids_thresh_iqr = thisbin_objectids[
thisbin_iqr > (thisbin_iqr_median +
thisbin_min_iqr_stdev*thisbin_iqr_stdev)
]
thisbin_inveta_median = np.median(thisbin_inveta)
thisbin_inveta_stdev = np.median(
np.abs(thisbin_inveta - thisbin_inveta_median)
) * 1.483
binned_inveta_median.append(thisbin_inveta_median)
binned_inveta_stdev.append(thisbin_inveta_stdev)
if isinstance(magcol_min_inveta_stdev, float):
thisbin_objectids_thresh_inveta = thisbin_objectids[
thisbin_inveta > (
thisbin_inveta_median +
magcol_min_inveta_stdev*thisbin_inveta_stdev
)
]
elif (isinstance(magcol_min_inveta_stdev, np.ndarray) or
isinstance(magcol_min_inveta_stdev, list)):
thisbin_min_inveta_stdev = magcol_min_inveta_stdev[magi]
if not np.isfinite(thisbin_min_inveta_stdev):
LOGWARNING('provided threshold inveta stdev '
'for magbin: %.3f is nan, using 2.0' %
thisbin_sdssr_median)
thisbin_min_inveta_stdev = 2.0
# update the input list/array as well, since we'll be
# saving it to the output dict and using it to plot the
# variability thresholds
magcol_min_inveta_stdev[magi] = 2.0
thisbin_objectids_thresh_inveta = thisbin_objectids[
thisbin_inveta > (
thisbin_inveta_median +
thisbin_min_inveta_stdev*thisbin_inveta_stdev
)
]
else:
thisbin_objectids_thresh_stetsonj = (
np.array([],dtype=np.unicode_)
)
thisbin_objectids_thresh_iqr = (
np.array([],dtype=np.unicode_)
)
thisbin_objectids_thresh_inveta = (
np.array([],dtype=np.unicode_)
)
#
# done with check for enough objects in the bin
#
# get the intersection of all threshold objects to get objects that
# lie above the threshold for all variable indices
thisbin_objectids_thresh_all = reduce(
np.intersect1d,
(thisbin_objectids_thresh_stetsonj,
thisbin_objectids_thresh_iqr,
thisbin_objectids_thresh_inveta)
)
binned_objectids.append(thisbin_objectids)
binned_sdssr.append(thisbin_sdssr)
binned_lcmad.append(thisbin_lcmad)
binned_stetsonj.append(thisbin_stetsonj)
binned_iqr.append(thisbin_iqr)
binned_inveta.append(thisbin_inveta)
binned_count.append(thisbin_objectids.size)
binned_objectids_thresh_stetsonj.append(
thisbin_objectids_thresh_stetsonj
)
binned_objectids_thresh_iqr.append(
thisbin_objectids_thresh_iqr
)
binned_objectids_thresh_inveta.append(
thisbin_objectids_thresh_inveta
)
binned_objectids_thresh_all.append(
thisbin_objectids_thresh_all
)
#
# done with magbins
#
# update the output dict for this magcol
allobjects[magcol]['magbins'] = magbins
allobjects[magcol]['binned_objectids'] = binned_objectids
allobjects[magcol]['binned_sdssr_median'] = binned_sdssr_median
allobjects[magcol]['binned_sdssr'] = binned_sdssr
allobjects[magcol]['binned_count'] = binned_count
allobjects[magcol]['binned_lcmad'] = binned_lcmad
allobjects[magcol]['binned_lcmad_median'] = binned_lcmad_median
allobjects[magcol]['binned_lcmad_stdev'] = binned_lcmad_stdev
allobjects[magcol]['binned_stetsonj'] = binned_stetsonj
allobjects[magcol]['binned_stetsonj_median'] = binned_stetsonj_median
allobjects[magcol]['binned_stetsonj_stdev'] = binned_stetsonj_stdev
allobjects[magcol]['binned_iqr'] = binned_iqr
allobjects[magcol]['binned_iqr_median'] = binned_iqr_median
allobjects[magcol]['binned_iqr_stdev'] = binned_iqr_stdev
allobjects[magcol]['binned_inveta'] = binned_inveta
allobjects[magcol]['binned_inveta_median'] = binned_inveta_median
allobjects[magcol]['binned_inveta_stdev'] = binned_inveta_stdev
allobjects[magcol]['binned_objectids_thresh_stetsonj'] = (
binned_objectids_thresh_stetsonj
)
allobjects[magcol]['binned_objectids_thresh_iqr'] = (
binned_objectids_thresh_iqr
)
allobjects[magcol]['binned_objectids_thresh_inveta'] = (
binned_objectids_thresh_inveta
)
allobjects[magcol]['binned_objectids_thresh_all'] = (
binned_objectids_thresh_all
)
# get the common selected objects thru all measures
try:
allobjects[magcol]['objectids_all_thresh_all_magbins'] = np.unique(
np.concatenate(
allobjects[magcol]['binned_objectids_thresh_all']
)
)
except ValueError:
LOGWARNING('not enough variable objects matching all thresholds')
allobjects[magcol]['objectids_all_thresh_all_magbins'] = (
np.array([])
)
allobjects[magcol]['objectids_stetsonj_thresh_all_magbins'] = np.unique(
np.concatenate(
allobjects[magcol]['binned_objectids_thresh_stetsonj']
)
)
allobjects[magcol]['objectids_inveta_thresh_all_magbins'] = np.unique(
np.concatenate(allobjects[magcol]['binned_objectids_thresh_inveta'])
)
allobjects[magcol]['objectids_iqr_thresh_all_magbins'] = np.unique(
np.concatenate(allobjects[magcol]['binned_objectids_thresh_iqr'])
)
# turn these into np.arrays for easier plotting if they're lists
if isinstance(min_stetj_stdev, list):
allobjects[magcol]['min_stetj_stdev'] = np.array(
magcol_min_stetj_stdev
)
else:
allobjects[magcol]['min_stetj_stdev'] = magcol_min_stetj_stdev
if isinstance(min_iqr_stdev, list):
allobjects[magcol]['min_iqr_stdev'] = np.array(
magcol_min_iqr_stdev
)
else:
allobjects[magcol]['min_iqr_stdev'] = magcol_min_iqr_stdev
if isinstance(min_inveta_stdev, list):
allobjects[magcol]['min_inveta_stdev'] = np.array(
magcol_min_inveta_stdev
)
else:
allobjects[magcol]['min_inveta_stdev'] = magcol_min_inveta_stdev
# this one doesn't get touched (for now)
allobjects[magcol]['min_lcmad_stdev'] = min_lcmad_stdev
#
# done with all magcols
#
allobjects['magbins'] = magbins
with open(outfile,'wb') as outfd:
pickle.dump(allobjects, outfd, protocol=pickle.HIGHEST_PROTOCOL)
return allobjects
def plot_variability_thresholds(varthreshpkl,
xmin_lcmad_stdev=5.0,
xmin_stetj_stdev=2.0,
xmin_iqr_stdev=2.0,
xmin_inveta_stdev=2.0,
lcformat='hat-sql',
lcformatdir=None,
magcols=None):
'''This makes plots for the variability threshold distributions.
Parameters
----------
varthreshpkl : str
The pickle produced by the function above.
xmin_lcmad_stdev,xmin_stetj_stdev,xmin_iqr_stdev,xmin_inveta_stdev : float or np.array
Values of the threshold values to override the ones in the
`vartresholdpkl`. If provided, will plot the thresholds accordingly
instead of using the ones in the input pickle directly.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
magcols : list of str or None
The magcol keys to use from the lcdict.
Returns
-------
str
The file name of the threshold plot generated.
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception:
LOGEXCEPTION("can't figure out the light curve format")
return None
if magcols is None:
magcols = dmagcols
with open(varthreshpkl,'rb') as infd:
allobjects = pickle.load(infd)
magbins = allobjects['magbins']
for magcol in magcols:
min_lcmad_stdev = (
xmin_lcmad_stdev or allobjects[magcol]['min_lcmad_stdev']
)
min_stetj_stdev = (
xmin_stetj_stdev or allobjects[magcol]['min_stetj_stdev']
)
min_iqr_stdev = (
xmin_iqr_stdev or allobjects[magcol]['min_iqr_stdev']
)
min_inveta_stdev = (
xmin_inveta_stdev or allobjects[magcol]['min_inveta_stdev']
)
fig = plt.figure(figsize=(20,16))
# the mag vs lcmad
plt.subplot(221)
plt.plot(allobjects[magcol]['sdssr'],
allobjects[magcol]['lcmad']*1.483,
marker='.',ms=1.0, linestyle='none',
rasterized=True)
plt.plot(allobjects[magcol]['binned_sdssr_median'],
np.array(allobjects[magcol]['binned_lcmad_median'])*1.483,
linewidth=3.0)
plt.plot(
allobjects[magcol]['binned_sdssr_median'],
np.array(allobjects[magcol]['binned_lcmad_median'])*1.483 +
min_lcmad_stdev*np.array(
allobjects[magcol]['binned_lcmad_stdev']
),
linewidth=3.0, linestyle='dashed'
)
plt.xlim((magbins.min()-0.25, magbins.max()))
plt.xlabel('SDSS r')
plt.ylabel(r'lightcurve RMS (MAD $\times$ 1.483)')
plt.title('%s - SDSS r vs. light curve RMS' % magcol)
plt.yscale('log')
plt.tight_layout()
# the mag vs stetsonj
plt.subplot(222)
plt.plot(allobjects[magcol]['sdssr'],
allobjects[magcol]['stetsonj'],
marker='.',ms=1.0, linestyle='none',
rasterized=True)
plt.plot(allobjects[magcol]['binned_sdssr_median'],
allobjects[magcol]['binned_stetsonj_median'],
linewidth=3.0)
plt.plot(
allobjects[magcol]['binned_sdssr_median'],
np.array(allobjects[magcol]['binned_stetsonj_median']) +
min_stetj_stdev*np.array(
allobjects[magcol]['binned_stetsonj_stdev']
),
linewidth=3.0, linestyle='dashed'
)
plt.xlim((magbins.min()-0.25, magbins.max()))
plt.xlabel('SDSS r')
plt.ylabel('Stetson J index')
plt.title('%s - SDSS r vs. Stetson J index' % magcol)
plt.yscale('log')
plt.tight_layout()
# the mag vs IQR
plt.subplot(223)
plt.plot(allobjects[magcol]['sdssr'],
allobjects[magcol]['iqr'],
marker='.',ms=1.0, linestyle='none',
rasterized=True)
plt.plot(allobjects[magcol]['binned_sdssr_median'],
allobjects[magcol]['binned_iqr_median'],
linewidth=3.0)
plt.plot(
allobjects[magcol]['binned_sdssr_median'],
np.array(allobjects[magcol]['binned_iqr_median']) +
min_iqr_stdev*np.array(
allobjects[magcol]['binned_iqr_stdev']
),
linewidth=3.0, linestyle='dashed'
)
plt.xlabel('SDSS r')
plt.ylabel('IQR')
plt.title('%s - SDSS r vs. IQR' % magcol)
plt.xlim((magbins.min()-0.25, magbins.max()))
plt.yscale('log')
plt.tight_layout()
# the mag vs IQR
plt.subplot(224)
plt.plot(allobjects[magcol]['sdssr'],
allobjects[magcol]['inveta'],
marker='.',ms=1.0, linestyle='none',
rasterized=True)
plt.plot(allobjects[magcol]['binned_sdssr_median'],
allobjects[magcol]['binned_inveta_median'],
linewidth=3.0)
plt.plot(
allobjects[magcol]['binned_sdssr_median'],
np.array(allobjects[magcol]['binned_inveta_median']) +
min_inveta_stdev*np.array(
allobjects[magcol]['binned_inveta_stdev']
),
linewidth=3.0, linestyle='dashed'
)
plt.xlabel('SDSS r')
plt.ylabel(r'$1/\eta$')
plt.title(r'%s - SDSS r vs. $1/\eta$' % magcol)
plt.xlim((magbins.min()-0.25, magbins.max()))
plt.yscale('log')
plt.tight_layout()
plt.savefig('varfeatures-%s-%s-distributions.png' % (varthreshpkl,
magcol),
bbox_inches='tight')
plt.close('all')
|
mit
|
saguziel/incubator-airflow
|
airflow/contrib/hooks/salesforce_hook.py
|
30
|
12110
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains a Salesforce Hook
which allows you to connect to your Salesforce instance,
retrieve data from it, and write that data to a file
for other uses.
NOTE: this hook also relies on the simple_salesforce package:
https://github.com/simple-salesforce/simple-salesforce
"""
from simple_salesforce import Salesforce
from airflow.hooks.base_hook import BaseHook
import logging
import json
import pandas as pd
import time
class SalesforceHook(BaseHook):
def __init__(
self,
conn_id,
*args,
**kwargs
):
"""
Create new connection to Salesforce
and allows you to pull data out of SFDC and save it to a file.
You can then use that file with other
Airflow operators to move the data into another data source
:param conn_id: the name of the connection that has the parameters
we need to connect to Salesforce.
The conenction shoud be type `http` and include a
user's security token in the `Extras` field.
.. note::
For the HTTP connection type, you can include a
JSON structure in the `Extras` field.
We need a user's security token to connect to Salesforce.
So we define it in the `Extras` field as:
`{"security_token":"YOUR_SECRUITY_TOKEN"}`
"""
self.conn_id = conn_id
self._args = args
self._kwargs = kwargs
# get the connection parameters
self.connection = self.get_connection(conn_id)
self.extras = self.connection.extra_dejson
def sign_in(self):
"""
Sign into Salesforce.
If we have already signed it, this will just return the original object
"""
if hasattr(self, 'sf'):
return self.sf
# connect to Salesforce
sf = Salesforce(
username=self.connection.login,
password=self.connection.password,
security_token=self.extras['security_token'],
instance_url=self.connection.host
)
self.sf = sf
return sf
def make_query(self, query):
"""
Make a query to Salesforce. Returns result in dictionary
:param query: The query to make to Salesforce
"""
self.sign_in()
logging.info("Querying for all objects")
query = self.sf.query_all(query)
logging.info(
"Received results: Total size: {0}; Done: {1}".format(
query['totalSize'], query['done']
)
)
query = json.loads(json.dumps(query))
return query
def describe_object(self, obj):
"""
Get the description of an object from Salesforce.
This description is the object's schema
and some extra metadata that Salesforce stores for each object
:param obj: Name of the Salesforce object
that we are getting a description of.
"""
self.sign_in()
return json.loads(json.dumps(self.sf.__getattr__(obj).describe()))
def get_available_fields(self, obj):
"""
Get a list of all available fields for an object.
This only returns the names of the fields.
"""
self.sign_in()
desc = self.describe_object(obj)
return [f['name'] for f in desc['fields']]
def _build_field_list(self, fields):
# join all of the fields in a comma seperated list
return ",".join(fields)
def get_object_from_salesforce(self, obj, fields):
"""
Get all instances of the `object` from Salesforce.
For each model, only get the fields specified in fields.
All we really do underneath the hood is run:
SELECT <fields> FROM <obj>;
"""
field_string = self._build_field_list(fields)
query = "SELECT {0} FROM {1}".format(field_string, obj)
logging.info(
"Making query to salesforce: {0}".format(
query if len(query) < 30
else " ... ".join([query[:15], query[-15:]])
)
)
return self.make_query(query)
@classmethod
def _to_timestamp(cls, col):
"""
Convert a column of a dataframe to UNIX timestamps if applicable
:param col: A Series object representing a column of a dataframe.
"""
# try and convert the column to datetimes
# the column MUST have a four digit year somewhere in the string
# there should be a better way to do this,
# but just letting pandas try and convert every column without a format
# caused it to convert floats as well
# For example, a column of integers
# between 0 and 10 are turned into timestamps
# if the column cannot be converted,
# just return the original column untouched
try:
col = pd.to_datetime(col)
except ValueError:
logging.warning(
"Could not convert field to timestamps: {0}".format(col.name)
)
return col
# now convert the newly created datetimes into timestamps
# we have to be careful here
# because NaT cannot be converted to a timestamp
# so we have to return NaN
converted = []
for i in col:
try:
converted.append(i.timestamp())
except ValueError:
converted.append(pd.np.NaN)
except AttributeError:
converted.append(pd.np.NaN)
# return a new series that maintains the same index as the original
return pd.Series(converted, index=col.index)
def write_object_to_file(
self,
query_results,
filename,
fmt="csv",
coerce_to_timestamp=False,
record_time_added=False
):
"""
Write query results to file.
Acceptable formats are:
- csv:
comma-seperated-values file. This is the default format.
- json:
JSON array. Each element in the array is a different row.
- ndjson:
JSON array but each element is new-line deliminated
instead of comman deliminated like in `json`
This requires a significant amount of cleanup.
Pandas doesn't handle output to CSV and json in a uniform way.
This is especially painful for datetime types.
Pandas wants to write them as strings in CSV,
but as milisecond Unix timestamps.
By default, this function will try and leave all values as
they are represented in Salesforce.
You use the `coerce_to_timestamp` flag to force all datetimes
to become Unix timestamps (UTC).
This is can be greatly beneficial as it will make all of your
datetime fields look the same,
and makes it easier to work with in other database environments
:param query_results: the results from a SQL query
:param filename: the name of the file where the data
should be dumped to
:param fmt: the format you want the output in.
*Default:* csv.
:param coerce_to_timestamp: True if you want all datetime fields to be
converted into Unix timestamps.
False if you want them to be left in the
same format as they were in Salesforce.
Leaving the value as False will result
in datetimes being strings.
*Defaults to False*
:param record_time_added: *(optional)* True if you want to add a
Unix timestamp field to the resulting data
that marks when the data
was fetched from Salesforce.
*Default: False*.
"""
fmt = fmt.lower()
if fmt not in ['csv', 'json', 'ndjson']:
raise ValueError("Format value is not recognized: {0}".format(fmt))
# this line right here will convert all integers to floats if there are
# any None/np.nan values in the column
# that's because None/np.nan cannot exist in an integer column
# we should write all of our timestamps as FLOATS in our final schema
df = pd.DataFrame.from_records(query_results, exclude=["attributes"])
df.columns = [c.lower() for c in df.columns]
# convert columns with datetime strings to datetimes
# not all strings will be datetimes, so we ignore any errors that occur
# we get the object's definition at this point and only consider
# features that are DATE or DATETIME
if coerce_to_timestamp and df.shape[0] > 0:
# get the object name out of the query results
# it's stored in the "attributes" dictionary
# for each returned record
object_name = query_results[0]['attributes']['type']
logging.info("Coercing timestamps for: {0}".format(object_name))
schema = self.describe_object(object_name)
# possible columns that can be convereted to timestamps
# are the ones that are either date or datetime types
# strings are too general and we risk unintentional conversion
possible_timestamp_cols = [
i['name'].lower()
for i in schema['fields']
if i['type'] in ["date", "datetime"] and
i['name'].lower() in df.columns
]
df[possible_timestamp_cols] = df[possible_timestamp_cols].apply(
lambda x: self._to_timestamp(x)
)
if record_time_added:
fetched_time = time.time()
df["time_fetched_from_salesforce"] = fetched_time
# write the CSV or JSON file depending on the option
# NOTE:
# datetimes here are an issue.
# There is no good way to manage the difference
# for to_json, the options are an epoch or a ISO string
# but for to_csv, it will be a string output by datetime
# For JSON we decided to output the epoch timestamp in seconds
# (as is fairly standard for JavaScript)
# And for csv, we do a string
if fmt == "csv":
# there are also a ton of newline objects
# that mess up our ability to write to csv
# we remove these newlines so that the output is a valid CSV format
logging.info("Cleaning data and writing to CSV")
possible_strings = df.columns[df.dtypes == "object"]
df[possible_strings] = df[possible_strings].apply(
lambda x: x.str.replace("\r\n", "")
)
df[possible_strings] = df[possible_strings].apply(
lambda x: x.str.replace("\n", "")
)
# write the dataframe
df.to_csv(filename, index=False)
elif fmt == "json":
df.to_json(filename, "records", date_unit="s")
elif fmt == "ndjson":
df.to_json(filename, "records", lines=True, date_unit="s")
return df
|
apache-2.0
|
ky822/scikit-learn
|
sklearn/ensemble/tests/test_forest.py
|
6
|
35370
|
"""
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import product
import numpy as np
from scipy.sparse import csr_matrix, csc_matrix, coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, X, y):
# Check variable importances.
ForestClassifier = FOREST_CLASSIFIERS[name]
for n_jobs in [1, 2]:
clf = ForestClassifier(n_estimators=10, n_jobs=n_jobs)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
X_new = clf.transform(X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
importances = clf.feature_importances_
assert_true(np.all(importances >= 0.0))
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=3 * sample_weight)
importances_bis = clf.feature_importances_
assert_almost_equal(importances, importances_bis)
def test_importances():
X, y = datasets.make_classification(n_samples=1000, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name in FOREST_CLASSIFIERS:
yield check_importances, name, X, y
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name, X, y):
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name, X, y
def check_min_samples_leaf(name, X, y):
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
est = ForestEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def test_min_samples_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name, X, y
def check_min_weight_fraction_leaf(name, X, y):
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
if isinstance(est, (RandomForestClassifier,
RandomForestRegressor)):
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name, X, y
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=40)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@ignore_warnings
def test_1d_input():
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
|
bsd-3-clause
|
rmcgibbo/mdtraj
|
mdtraj/formats/pdb/pdbfile.py
|
2
|
29740
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Peter Eastman, Robert McGibbon
# Contributors: Carlos Hernandez, Jason Swails
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
# Portions copyright (c) 2012 Stanford University and the Authors.
#
# Portions of this code originate from the OpenMM molecular simulation
# toolkit. Those portions are Copyright 2008-2012 Stanford University
# and Peter Eastman, and distributed under the following license:
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
##############################################################################
from __future__ import print_function, division
import os
from datetime import date
import gzip
import numpy as np
import xml.etree.ElementTree as etree
from copy import copy
from mdtraj.formats.pdb.pdbstructure import PdbStructure
from mdtraj.core.topology import Topology
from mdtraj.utils import ilen, cast_indices, in_units_of, open_maybe_zipped
from mdtraj.formats.registry import FormatRegistry
from mdtraj.core import element as elem
from mdtraj.utils import six
from mdtraj import version
import warnings
if six.PY3:
from urllib.request import urlopen
from urllib.parse import urlparse
from urllib.parse import (uses_relative, uses_netloc, uses_params)
else:
from urllib2 import urlopen
from urlparse import urlparse
from urlparse import uses_relative, uses_netloc, uses_params
# Ugly hack -- we don't always issue UserWarning in Py2, but we need to in
# this module
warnings.filterwarnings('always', category=UserWarning, module=__name__)
_VALID_URLS = set(uses_relative + uses_netloc + uses_params)
_VALID_URLS.discard('')
__all__ = ['load_pdb', 'PDBTrajectoryFile']
##############################################################################
# Code
##############################################################################
def _is_url(url):
"""Check to see if a URL has a valid protocol.
from pandas/io.common.py Copyright 2014 Pandas Developers
Used under the BSD licence
"""
try:
return urlparse(url).scheme in _VALID_URLS
except (AttributeError, TypeError):
return False
@FormatRegistry.register_loader('.pdb')
@FormatRegistry.register_loader('.pdb.gz')
def load_pdb(filename, stride=None, atom_indices=None, frame=None,
no_boxchk=False, standard_names=True ):
"""Load a RCSB Protein Data Bank file from disk.
Parameters
----------
filename : str
Path to the PDB file on disk. The string could be a URL. Valid URL
schemes include http and ftp.
stride : int, default=None
Only read every stride-th model from the file
atom_indices : array_like, default=None
If not None, then read only a subset of the atoms coordinates from the
file. These indices are zero-based (not 1 based, as used by the PDB
format). So if you want to load only the first atom in the file, you
would supply ``atom_indices = np.array([0])``.
frame : int, default=None
Use this option to load only a single frame from a trajectory on disk.
If frame is None, the default, the entire trajectory will be loaded.
If supplied, ``stride`` will be ignored.
no_boxchk : bool, default=False
By default, a heuristic check based on the particle density will be
performed to determine if the unit cell dimensions are absurd. If the
particle density is >1000 atoms per nm^3, the unit cell will be
discarded. This is done because all PDB files from RCSB contain a CRYST1
record, even if there are no periodic boundaries, and dummy values are
filled in instead. This check will filter out those false unit cells and
avoid potential errors in geometry calculations. Set this variable to
``True`` in order to skip this heuristic check.
standard_names : bool, default=True
If True, non-standard atomnames and residuenames are standardized to conform
with the current PDB format version. If set to false, this step is skipped.
Returns
-------
trajectory : md.Trajectory
The resulting trajectory, as an md.Trajectory object.
Examples
--------
>>> import mdtraj as md
>>> pdb = md.load_pdb('2EQQ.pdb')
>>> print(pdb)
<mdtraj.Trajectory with 20 frames, 423 atoms at 0x110740a90>
See Also
--------
mdtraj.PDBTrajectoryFile : Low level interface to PDB files
"""
from mdtraj import Trajectory
if not isinstance(filename, six.string_types):
raise TypeError('filename must be of type string for load_pdb. '
'you supplied %s' % type(filename))
atom_indices = cast_indices(atom_indices)
filename = str(filename)
with PDBTrajectoryFile(filename, standard_names=standard_names) as f:
atom_slice = slice(None) if atom_indices is None else atom_indices
if frame is not None:
coords = f.positions[[frame], atom_slice, :]
else:
coords = f.positions[::stride, atom_slice, :]
assert coords.ndim == 3, 'internal shape error'
n_frames = len(coords)
topology = f.topology
if atom_indices is not None:
topology = topology.subset(atom_indices)
if f.unitcell_angles is not None and f.unitcell_lengths is not None:
unitcell_lengths = np.array([f.unitcell_lengths] * n_frames)
unitcell_angles = np.array([f.unitcell_angles] * n_frames)
else:
unitcell_lengths = None
unitcell_angles = None
in_units_of(coords, f.distance_unit, Trajectory._distance_unit, inplace=True)
in_units_of(unitcell_lengths, f.distance_unit, Trajectory._distance_unit, inplace=True)
time = np.arange(len(coords))
if frame is not None:
time *= frame
elif stride is not None:
time *= stride
traj = Trajectory(xyz=coords, time=time, topology=topology,
unitcell_lengths=unitcell_lengths,
unitcell_angles=unitcell_angles)
if not no_boxchk and traj.unitcell_lengths is not None:
# Only one CRYST1 record is allowed, so only do this check for the first
# frame. Some RCSB PDB files do not *really* have a unit cell, but still
# have a CRYST1 record with a dummy definition. These boxes are usually
# tiny (e.g., 1 A^3), so check that the particle density in the unit
# cell is not absurdly high. Standard water density is ~55 M, which
# yields a particle density ~100 atoms per cubic nm. It should be safe
# to say that no particle density should exceed 10x that.
particle_density = traj.top.n_atoms / traj.unitcell_volumes[0]
if particle_density > 1000:
warnings.warn('Unlikely unit cell vectors detected in PDB file likely '
'resulting from a dummy CRYST1 record. Discarding unit '
'cell vectors.', category=UserWarning)
traj._unitcell_lengths = traj._unitcell_angles = None
return traj
@FormatRegistry.register_fileobject('.pdb')
@FormatRegistry.register_fileobject('.pdb.gz')
class PDBTrajectoryFile(object):
"""Interface for reading and writing Protein Data Bank (PDB) files
Parameters
----------
filename : str
The filename to open. A path to a file on disk.
mode : {'r', 'w'}
The mode in which to open the file, either 'r' for read or 'w' for write.
force_overwrite : bool
If opened in write mode, and a file by the name of `filename` already
exists on disk, should we overwrite it?
standard_names : bool, default=True
If True, non-standard atomnames and residuenames are standardized to conform
with the current PDB format version. If set to false, this step is skipped.
Attributes
----------
positions : np.ndarray, shape=(n_frames, n_atoms, 3)
topology : mdtraj.Topology
closed : bool
Notes
-----
When writing pdb files, mdtraj follows the PDB3.0 standard as closely as
possible. During *reading* however, we try to be more lenient. For instance,
we will parse common nonstandard atom names during reading, and convert them
into the standard names. The replacement table used by mdtraj is at
{mdtraj_source}/formats/pdb/data/pdbNames.xml.
See Also
--------
mdtraj.load_pdb : High-level wrapper that returns a ``md.Trajectory``
"""
distance_unit = 'angstroms'
_residueNameReplacements = {}
_atomNameReplacements = {}
_chain_names = [chr(ord('A') + i) for i in range(26)]
def __init__(self, filename, mode='r', force_overwrite=True, standard_names=True):
self._open = False
self._file = None
self._topology = None
self._positions = None
self._mode = mode
self._last_topology = None
self._standard_names = standard_names
if mode == 'r':
PDBTrajectoryFile._loadNameReplacementTables()
if _is_url(filename):
self._file = urlopen(filename)
if filename.lower().endswith('.gz'):
if six.PY3:
self._file = gzip.GzipFile(fileobj=self._file)
else:
self._file = gzip.GzipFile(fileobj=six.StringIO(
self._file.read()))
if six.PY3:
self._file = six.StringIO(self._file.read().decode('utf-8'))
else:
self._file = open_maybe_zipped(filename, 'r')
self._read_models()
elif mode == 'w':
self._header_written = False
self._footer_written = False
self._file = open_maybe_zipped(filename, 'w', force_overwrite)
else:
raise ValueError("invalid mode: %s" % mode)
self._open = True
def write(self, positions, topology, modelIndex=None, unitcell_lengths=None,
unitcell_angles=None, bfactors=None):
"""Write a PDB file to disk
Parameters
----------
positions : array_like
The list of atomic positions to write.
topology : mdtraj.Topology
The Topology defining the model to write.
modelIndex : {int, None}
If not None, the model will be surrounded by MODEL/ENDMDL records
with this index
unitcell_lengths : {tuple, None}
Lengths of the three unit cell vectors, or None for a non-periodic system
unitcell_angles : {tuple, None}
Angles between the three unit cell vectors, or None for a non-periodic system
bfactors : array_like, default=None, shape=(n_atoms,)
Save bfactors with pdb file. Should contain a single number for
each atom in the topology
"""
if not self._mode == 'w':
raise ValueError('file not opened for writing')
if not self._header_written:
self._write_header(unitcell_lengths, unitcell_angles)
self._header_written = True
if ilen(topology.atoms) != len(positions):
raise ValueError('The number of positions must match the number of atoms')
if np.any(np.isnan(positions)):
raise ValueError('Particle position is NaN')
if np.any(np.isinf(positions)):
raise ValueError('Particle position is infinite')
self._last_topology = topology # Hack to save the topology of the last frame written, allows us to output CONECT entries in write_footer()
if bfactors is None:
bfactors = ['{0:5.2f}'.format(0.0)] * len(positions)
else:
if (np.max(bfactors) >= 100) or (np.min(bfactors) <= -10):
raise ValueError("bfactors must be in (-10, 100)")
bfactors = ['{0:5.2f}'.format(b) for b in bfactors]
atomIndex = 1
posIndex = 0
if modelIndex is not None:
print("MODEL %4d" % modelIndex, file=self._file)
for (chainIndex, chain) in enumerate(topology.chains):
chainName = self._chain_names[chainIndex % len(self._chain_names)]
residues = list(chain.residues)
for (resIndex, res) in enumerate(residues):
if len(res.name) > 3:
resName = res.name[:3]
else:
resName = res.name
for atom in res.atoms:
if len(atom.name) < 4 and atom.name[:1].isalpha() and (atom.element is None or len(atom.element.symbol) < 2):
atomName = ' '+atom.name
elif len(atom.name) > 4:
atomName = atom.name[:4]
else:
atomName = atom.name
coords = positions[posIndex]
if atom.element is not None:
symbol = atom.element.symbol
else:
symbol = ' '
line = "ATOM %5d %-4s %3s %1s%4d %s%s%s 1.00 %5s %-4s%2s " % ( # Right-justify atom symbol
atomIndex % 100000, atomName, resName, chainName,
(res.resSeq) % 10000, _format_83(coords[0]),
_format_83(coords[1]), _format_83(coords[2]),
bfactors[posIndex], atom.segment_id[:4], symbol[-2:])
assert len(line) == 80, 'Fixed width overflow detected'
print(line, file=self._file)
posIndex += 1
atomIndex += 1
if resIndex == len(residues)-1:
print("TER %5d %3s %s%4d" % (atomIndex, resName, chainName, res.resSeq), file=self._file)
atomIndex += 1
if modelIndex is not None:
print("ENDMDL", file=self._file)
def _write_header(self, unitcell_lengths, unitcell_angles, write_metadata=True):
"""Write out the header for a PDB file.
Parameters
----------
unitcell_lengths : {tuple, None}
The lengths of the three unitcell vectors, ``a``, ``b``, ``c``
unitcell_angles : {tuple, None}
The angles between the three unitcell vectors, ``alpha``,
``beta``, ``gamma``
"""
if not self._mode == 'w':
raise ValueError('file not opened for writing')
if unitcell_lengths is None and unitcell_angles is None:
return
if unitcell_lengths is not None and unitcell_angles is not None:
if not len(unitcell_lengths) == 3:
raise ValueError('unitcell_lengths must be length 3')
if not len(unitcell_angles) == 3:
raise ValueError('unitcell_angles must be length 3')
else:
raise ValueError('either unitcell_lengths and unitcell_angles'
'should both be spefied, or neither')
box = list(unitcell_lengths) + list(unitcell_angles)
assert len(box) == 6
if write_metadata:
print("REMARK 1 CREATED WITH MDTraj %s, %s" % (version.version, str(date.today())), file=self._file)
print("CRYST1%9.3f%9.3f%9.3f%7.2f%7.2f%7.2f P 1 1 " % tuple(box), file=self._file)
def _write_footer(self):
if not self._mode == 'w':
raise ValueError('file not opened for writing')
# Identify bonds that should be listed as CONECT records.
standardResidues = ['ALA', 'ASN', 'CYS', 'GLU', 'HIS', 'LEU', 'MET', 'PRO', 'THR', 'TYR',
'ARG', 'ASP', 'GLN', 'GLY', 'ILE', 'LYS', 'PHE', 'SER', 'TRP', 'VAL',
'A', 'G', 'C', 'U', 'I', 'DA', 'DG', 'DC', 'DT', 'DI', 'HOH']
conectBonds = []
if self._last_topology is not None:
for atom1, atom2 in self._last_topology.bonds:
if atom1.residue.name not in standardResidues or atom2.residue.name not in standardResidues:
conectBonds.append((atom1, atom2))
elif atom1.name == 'SG' and atom2.name == 'SG' and atom1.residue.name == 'CYS' and atom2.residue.name == 'CYS':
conectBonds.append((atom1, atom2))
if len(conectBonds) > 0:
# Work out the index used in the PDB file for each atom.
atomIndex = {}
nextAtomIndex = 0
prevChain = None
for chain in self._last_topology.chains:
for atom in chain.atoms:
if atom.residue.chain != prevChain:
nextAtomIndex += 1
prevChain = atom.residue.chain
atomIndex[atom] = nextAtomIndex
nextAtomIndex += 1
# Record which other atoms each atom is bonded to.
atomBonds = {}
for atom1, atom2 in conectBonds:
index1 = atomIndex[atom1]
index2 = atomIndex[atom2]
if index1 not in atomBonds:
atomBonds[index1] = []
if index2 not in atomBonds:
atomBonds[index2] = []
atomBonds[index1].append(index2)
atomBonds[index2].append(index1)
# Write the CONECT records.
for index1 in sorted(atomBonds):
bonded = atomBonds[index1]
while len(bonded) > 4:
print("CONECT%5d%5d%5d%5d" % (index1, bonded[0], bonded[1], bonded[2]), file=self._file)
del bonded[:4]
line = "CONECT%5d" % index1
for index2 in bonded:
line = "%s%5d" % (line, index2)
print(line, file=self._file)
print("END", file=self._file)
self._footer_written = True
@classmethod
def set_chain_names(cls, values):
"""Set the cycle of chain names used when writing PDB files
When writing PDB files, PDBTrajectoryFile translates each chain's
index into a name -- the name is what's written in the file. By
default, chains are named with the letters A-Z.
Parameters
----------
values : list
A list of chacters (strings of length 1) that the PDB writer will
cycle through to choose chain names.
"""
for item in values:
if not isinstance(item, six.string_types) and len(item) == 1:
raise TypeError('Names must be a single character string')
cls._chain_names = values
@property
def positions(self):
"""The cartesian coordinates of all of the atoms in each frame. Available when a file is opened in mode='r'
"""
return self._positions
@property
def topology(self):
"""The topology from this PDB file. Available when a file is opened in mode='r'
"""
return self._topology
@property
def unitcell_lengths(self):
"The unitcell lengths (3-tuple) in this PDB file. May be None"
return self._unitcell_lengths
@property
def unitcell_angles(self):
"The unitcell angles (3-tuple) in this PDB file. May be None"
return self._unitcell_angles
@property
def closed(self):
"Whether the file is closed"
return not self._open
def close(self):
"Close the PDB file"
if self._mode == 'w' and not self._footer_written:
self._write_footer()
if self._open:
self._file.close()
self._open = False
def _read_models(self):
if not self._mode == 'r':
raise ValueError('file not opened for reading')
self._topology = Topology()
pdb = PdbStructure(self._file, load_all_models=True)
atomByNumber = {}
for chain in pdb.iter_chains():
c = self._topology.add_chain()
for residue in chain.iter_residues():
resName = residue.get_name()
if resName in PDBTrajectoryFile._residueNameReplacements and self._standard_names:
resName = PDBTrajectoryFile._residueNameReplacements[resName]
r = self._topology.add_residue(resName, c, residue.number, residue.segment_id)
if resName in PDBTrajectoryFile._atomNameReplacements and self._standard_names:
atomReplacements = PDBTrajectoryFile._atomNameReplacements[resName]
else:
atomReplacements = {}
for atom in residue.atoms:
atomName = atom.get_name()
if atomName in atomReplacements:
atomName = atomReplacements[atomName]
atomName = atomName.strip()
element = atom.element
if element is None:
element = PDBTrajectoryFile._guess_element(atomName, residue.name, len(residue))
newAtom = self._topology.add_atom(atomName, element, r, serial=atom.serial_number)
atomByNumber[atom.serial_number] = newAtom
# load all of the positions (from every model)
_positions = []
for model in pdb.iter_models(use_all_models=True):
coords = []
for chain in model.iter_chains():
for residue in chain.iter_residues():
for atom in residue.atoms:
coords.append(atom.get_position())
_positions.append(coords)
if not all(len(f) == len(_positions[0]) for f in _positions):
raise ValueError('PDB Error: All MODELs must contain the same number of ATOMs')
self._positions = np.array(_positions)
## The atom positions read from the PDB file
self._unitcell_lengths = pdb.get_unit_cell_lengths()
self._unitcell_angles = pdb.get_unit_cell_angles()
self._topology.create_standard_bonds()
self._topology.create_disulfide_bonds(self.positions[0])
# Add bonds based on CONECT records.
connectBonds = []
for connect in pdb.models[-1].connects:
i = connect[0]
for j in connect[1:]:
if i in atomByNumber and j in atomByNumber:
connectBonds.append((atomByNumber[i], atomByNumber[j]))
if len(connectBonds) > 0:
# Only add bonds that don't already exist.
existingBonds = set(self._topology.bonds)
for bond in connectBonds:
if bond not in existingBonds and (bond[1], bond[0]) not in existingBonds:
self._topology.add_bond(bond[0], bond[1])
existingBonds.add(bond)
@staticmethod
def _loadNameReplacementTables():
"""Load the list of atom and residue name replacements."""
if len(PDBTrajectoryFile._residueNameReplacements) == 0:
tree = etree.parse(os.path.join(os.path.dirname(__file__), 'data', 'pdbNames.xml'))
allResidues = {}
proteinResidues = {}
nucleicAcidResidues = {}
for residue in tree.getroot().findall('Residue'):
name = residue.attrib['name']
if name == 'All':
PDBTrajectoryFile._parseResidueAtoms(residue, allResidues)
elif name == 'Protein':
PDBTrajectoryFile._parseResidueAtoms(residue, proteinResidues)
elif name == 'Nucleic':
PDBTrajectoryFile._parseResidueAtoms(residue, nucleicAcidResidues)
for atom in allResidues:
proteinResidues[atom] = allResidues[atom]
nucleicAcidResidues[atom] = allResidues[atom]
for residue in tree.getroot().findall('Residue'):
name = residue.attrib['name']
for id in residue.attrib:
if id == 'name' or id.startswith('alt'):
PDBTrajectoryFile._residueNameReplacements[residue.attrib[id]] = name
if 'type' not in residue.attrib:
atoms = copy(allResidues)
elif residue.attrib['type'] == 'Protein':
atoms = copy(proteinResidues)
elif residue.attrib['type'] == 'Nucleic':
atoms = copy(nucleicAcidResidues)
else:
atoms = copy(allResidues)
PDBTrajectoryFile._parseResidueAtoms(residue, atoms)
PDBTrajectoryFile._atomNameReplacements[name] = atoms
@staticmethod
def _guess_element(atom_name, residue_name, residue_length):
"Try to guess the element name"
upper = atom_name.upper()
if upper.startswith('CL'):
element = elem.chlorine
elif upper.startswith('NA'):
element = elem.sodium
elif upper.startswith('MG'):
element = elem.magnesium
elif upper.startswith('BE'):
element = elem.beryllium
elif upper.startswith('LI'):
element = elem.lithium
elif upper.startswith('K'):
element = elem.potassium
elif upper.startswith('ZN'):
element = elem.zinc
elif residue_length == 1 and upper.startswith('CA'):
element = elem.calcium
# TJL has edited this. There are a few issues here. First,
# parsing for the element is non-trivial, so I do my best
# below. Second, there is additional parsing code in
# pdbstructure.py, and I am unsure why it doesn't get used
# here...
elif residue_length > 1 and upper.startswith('CE'):
element = elem.carbon # (probably) not Celenium...
elif residue_length > 1 and upper.startswith('CD'):
element = elem.carbon # (probably) not Cadmium...
elif residue_name in ['TRP', 'ARG', 'GLN', 'HIS'] and upper.startswith('NE'):
element = elem.nitrogen # (probably) not Neon...
elif residue_name in ['ASN'] and upper.startswith('ND'):
element = elem.nitrogen # (probably) not ND...
elif residue_name == 'CYS' and upper.startswith('SG'):
element = elem.sulfur # (probably) not SG...
else:
try:
element = elem.get_by_symbol(atom_name[0])
except KeyError:
try:
symbol = atom_name[0:2].strip().rstrip("AB0123456789").lstrip("0123456789")
element = elem.get_by_symbol(symbol)
except KeyError:
element = None
return element
@staticmethod
def _parseResidueAtoms(residue, map):
for atom in residue.findall('Atom'):
name = atom.attrib['name']
for id in atom.attrib:
map[atom.attrib[id]] = name
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def __len__(self):
"Number of frames in the file"
if str(self._mode) != 'r':
raise NotImplementedError('len() only available in mode="r" currently')
if not self._open:
raise ValueError('I/O operation on closed file')
return len(self._positions)
def _format_83(f):
"""Format a single float into a string of width 8, with ideally 3 decimal
places of precision. If the number is a little too large, we can
gracefully degrade the precision by lopping off some of the decimal
places. If it's much too large, we throw a ValueError"""
if -999.999 < f < 9999.999:
return '%8.3f' % f
if -9999999 < f < 99999999:
return ('%8.3f' % f)[:8]
raise ValueError('coordinate "%s" could not be represnted '
'in a width-8 field' % f)
|
lgpl-2.1
|
pnedunuri/scipy
|
doc/source/tutorial/examples/normdiscr_plot1.py
|
84
|
1547
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
npoints = 20 # number of integer support points of the distribution minus 1
npointsh = npoints / 2
npointsf = float(npoints)
nbound = 4 #bounds for the truncated normal
normbound = (1 + 1 / npointsf) * nbound #actual bounds of truncated normal
grid = np.arange(-npointsh, npointsh+2, 1) #integer grid
gridlimitsnorm = (grid-0.5) / npointsh * nbound #bin limits for the truncnorm
gridlimits = grid - 0.5
grid = grid[:-1]
probs = np.diff(stats.truncnorm.cdf(gridlimitsnorm, -normbound, normbound))
gridint = grid
normdiscrete = stats.rv_discrete(
values=(gridint, np.round(probs, decimals=7)),
name='normdiscrete')
n_sample = 500
np.random.seed(87655678) #fix the seed for replicability
rvs = normdiscrete.rvs(size=n_sample)
rvsnd=rvs
f,l = np.histogram(rvs, bins=gridlimits)
sfreq = np.vstack([gridint, f, probs*n_sample]).T
fs = sfreq[:,1] / float(n_sample)
ft = sfreq[:,2] / float(n_sample)
nd_std = np.sqrt(normdiscrete.stats(moments='v'))
ind = gridint # the x locations for the groups
width = 0.35 # the width of the bars
plt.subplot(111)
rects1 = plt.bar(ind, ft, width, color='b')
rects2 = plt.bar(ind+width, fs, width, color='r')
normline = plt.plot(ind+width/2.0, stats.norm.pdf(ind, scale=nd_std),
color='b')
plt.ylabel('Frequency')
plt.title('Frequency and Probability of normdiscrete')
plt.xticks(ind+width, ind)
plt.legend((rects1[0], rects2[0]), ('true', 'sample'))
plt.show()
|
bsd-3-clause
|
jazcollins/models
|
cognitive_mapping_and_planning/tfcode/cmp.py
|
14
|
24415
|
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code for setting up the network for CMP.
Sets up the mapper and the planner.
"""
import sys, os, numpy as np
import matplotlib.pyplot as plt
import copy
import argparse, pprint
import time
import tensorflow as tf
from tensorflow.contrib import slim
from tensorflow.contrib.slim import arg_scope
import logging
from tensorflow.python.platform import app
from tensorflow.python.platform import flags
from src import utils
import src.file_utils as fu
import tfcode.nav_utils as nu
import tfcode.cmp_utils as cu
import tfcode.cmp_summary as cmp_s
from tfcode import tf_utils
value_iteration_network = cu.value_iteration_network
rotate_preds = cu.rotate_preds
deconv = cu.deconv
get_visual_frustum = cu.get_visual_frustum
fr_v2 = cu.fr_v2
setup_train_step_kwargs = nu.default_train_step_kwargs
compute_losses_multi_or = nu.compute_losses_multi_or
get_repr_from_image = nu.get_repr_from_image
_save_d_at_t = nu.save_d_at_t
_save_all = nu.save_all
_eval_ap = nu.eval_ap
_eval_dist = nu.eval_dist
_plot_trajectories = nu.plot_trajectories
_vis_readout_maps = cmp_s._vis_readout_maps
_vis = cmp_s._vis
_summary_vis = cmp_s._summary_vis
_summary_readout_maps = cmp_s._summary_readout_maps
_add_summaries = cmp_s._add_summaries
def _inputs(problem):
# Set up inputs.
with tf.name_scope('inputs'):
inputs = []
inputs.append(('orig_maps', tf.float32,
(problem.batch_size, 1, None, None, 1)))
inputs.append(('goal_loc', tf.float32,
(problem.batch_size, problem.num_goals, 2)))
common_input_data, _ = tf_utils.setup_inputs(inputs)
inputs = []
if problem.input_type == 'vision':
# Multiple images from an array of cameras.
inputs.append(('imgs', tf.float32,
(problem.batch_size, None, len(problem.aux_delta_thetas)+1,
problem.img_height, problem.img_width,
problem.img_channels)))
elif problem.input_type == 'analytical_counts':
for i in range(len(problem.map_crop_sizes)):
inputs.append(('analytical_counts_{:d}'.format(i), tf.float32,
(problem.batch_size, None, problem.map_crop_sizes[i],
problem.map_crop_sizes[i], problem.map_channels)))
if problem.outputs.readout_maps:
for i in range(len(problem.readout_maps_crop_sizes)):
inputs.append(('readout_maps_{:d}'.format(i), tf.float32,
(problem.batch_size, None,
problem.readout_maps_crop_sizes[i],
problem.readout_maps_crop_sizes[i],
problem.readout_maps_channels)))
for i in range(len(problem.map_crop_sizes)):
inputs.append(('ego_goal_imgs_{:d}'.format(i), tf.float32,
(problem.batch_size, None, problem.map_crop_sizes[i],
problem.map_crop_sizes[i], problem.goal_channels)))
for s in ['sum_num', 'sum_denom', 'max_denom']:
inputs.append(('running_'+s+'_{:d}'.format(i), tf.float32,
(problem.batch_size, 1, problem.map_crop_sizes[i],
problem.map_crop_sizes[i], problem.map_channels)))
inputs.append(('incremental_locs', tf.float32,
(problem.batch_size, None, 2)))
inputs.append(('incremental_thetas', tf.float32,
(problem.batch_size, None, 1)))
inputs.append(('step_number', tf.int32, (1, None, 1)))
inputs.append(('node_ids', tf.int32, (problem.batch_size, None,
problem.node_ids_dim)))
inputs.append(('perturbs', tf.float32, (problem.batch_size, None,
problem.perturbs_dim)))
# For plotting result plots
inputs.append(('loc_on_map', tf.float32, (problem.batch_size, None, 2)))
inputs.append(('gt_dist_to_goal', tf.float32, (problem.batch_size, None, 1)))
step_input_data, _ = tf_utils.setup_inputs(inputs)
inputs = []
inputs.append(('action', tf.int32, (problem.batch_size, None, problem.num_actions)))
train_data, _ = tf_utils.setup_inputs(inputs)
train_data.update(step_input_data)
train_data.update(common_input_data)
return common_input_data, step_input_data, train_data
def readout_general(multi_scale_belief, num_neurons, strides, layers_per_block,
kernel_size, batch_norm_is_training_op, wt_decay):
multi_scale_belief = tf.stop_gradient(multi_scale_belief)
with tf.variable_scope('readout_maps_deconv'):
x, outs = deconv(multi_scale_belief, batch_norm_is_training_op,
wt_decay=wt_decay, neurons=num_neurons, strides=strides,
layers_per_block=layers_per_block, kernel_size=kernel_size,
conv_fn=slim.conv2d_transpose, offset=0,
name='readout_maps_deconv')
probs = tf.sigmoid(x)
return x, probs
def running_combine(fss_logits, confs_probs, incremental_locs,
incremental_thetas, previous_sum_num, previous_sum_denom,
previous_max_denom, map_size, num_steps):
# fss_logits is B x N x H x W x C
# confs_logits is B x N x H x W x C
# incremental_locs is B x N x 2
# incremental_thetas is B x N x 1
# previous_sum_num etc is B x 1 x H x W x C
with tf.name_scope('combine_{:d}'.format(num_steps)):
running_sum_nums_ = []; running_sum_denoms_ = [];
running_max_denoms_ = [];
fss_logits_ = tf.unstack(fss_logits, axis=1, num=num_steps)
confs_probs_ = tf.unstack(confs_probs, axis=1, num=num_steps)
incremental_locs_ = tf.unstack(incremental_locs, axis=1, num=num_steps)
incremental_thetas_ = tf.unstack(incremental_thetas, axis=1, num=num_steps)
running_sum_num = tf.unstack(previous_sum_num, axis=1, num=1)[0]
running_sum_denom = tf.unstack(previous_sum_denom, axis=1, num=1)[0]
running_max_denom = tf.unstack(previous_max_denom, axis=1, num=1)[0]
for i in range(num_steps):
# Rotate the previous running_num and running_denom
running_sum_num, running_sum_denom, running_max_denom = rotate_preds(
incremental_locs_[i], incremental_thetas_[i], map_size,
[running_sum_num, running_sum_denom, running_max_denom],
output_valid_mask=False)[0]
# print i, num_steps, running_sum_num.get_shape().as_list()
running_sum_num = running_sum_num + fss_logits_[i] * confs_probs_[i]
running_sum_denom = running_sum_denom + confs_probs_[i]
running_max_denom = tf.maximum(running_max_denom, confs_probs_[i])
running_sum_nums_.append(running_sum_num)
running_sum_denoms_.append(running_sum_denom)
running_max_denoms_.append(running_max_denom)
running_sum_nums = tf.stack(running_sum_nums_, axis=1)
running_sum_denoms = tf.stack(running_sum_denoms_, axis=1)
running_max_denoms = tf.stack(running_max_denoms_, axis=1)
return running_sum_nums, running_sum_denoms, running_max_denoms
def get_map_from_images(imgs, mapper_arch, task_params, freeze_conv, wt_decay,
is_training, batch_norm_is_training_op, num_maps,
split_maps=True):
# Hit image with a resnet.
n_views = len(task_params.aux_delta_thetas) + 1
out = utils.Foo()
images_reshaped = tf.reshape(imgs,
shape=[-1, task_params.img_height,
task_params.img_width,
task_params.img_channels], name='re_image')
x, out.vars_to_restore = get_repr_from_image(
images_reshaped, task_params.modalities, task_params.data_augment,
mapper_arch.encoder, freeze_conv, wt_decay, is_training)
# Reshape into nice things so that these can be accumulated over time steps
# for faster backprop.
sh_before = x.get_shape().as_list()
out.encoder_output = tf.reshape(x, shape=[task_params.batch_size, -1, n_views] + sh_before[1:])
x = tf.reshape(out.encoder_output, shape=[-1] + sh_before[1:])
# Add a layer to reduce dimensions for a fc layer.
if mapper_arch.dim_reduce_neurons > 0:
ks = 1; neurons = mapper_arch.dim_reduce_neurons;
init_var = np.sqrt(2.0/(ks**2)/neurons)
batch_norm_param = mapper_arch.batch_norm_param
batch_norm_param['is_training'] = batch_norm_is_training_op
out.conv_feat = slim.conv2d(x, neurons, kernel_size=ks, stride=1,
normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_param,
padding='SAME', scope='dim_reduce',
weights_regularizer=slim.l2_regularizer(wt_decay),
weights_initializer=tf.random_normal_initializer(stddev=init_var))
reshape_conv_feat = slim.flatten(out.conv_feat)
sh = reshape_conv_feat.get_shape().as_list()
out.reshape_conv_feat = tf.reshape(reshape_conv_feat, shape=[-1, sh[1]*n_views])
with tf.variable_scope('fc'):
# Fully connected layers to compute the representation in top-view space.
fc_batch_norm_param = {'center': True, 'scale': True,
'activation_fn':tf.nn.relu,
'is_training': batch_norm_is_training_op}
f = out.reshape_conv_feat
out_neurons = (mapper_arch.fc_out_size**2)*mapper_arch.fc_out_neurons
neurons = mapper_arch.fc_neurons + [out_neurons]
f, _ = tf_utils.fc_network(f, neurons=neurons, wt_decay=wt_decay,
name='fc', offset=0,
batch_norm_param=fc_batch_norm_param,
is_training=is_training,
dropout_ratio=mapper_arch.fc_dropout)
f = tf.reshape(f, shape=[-1, mapper_arch.fc_out_size,
mapper_arch.fc_out_size,
mapper_arch.fc_out_neurons], name='re_fc')
# Use pool5 to predict the free space map via deconv layers.
with tf.variable_scope('deconv'):
x, outs = deconv(f, batch_norm_is_training_op, wt_decay=wt_decay,
neurons=mapper_arch.deconv_neurons,
strides=mapper_arch.deconv_strides,
layers_per_block=mapper_arch.deconv_layers_per_block,
kernel_size=mapper_arch.deconv_kernel_size,
conv_fn=slim.conv2d_transpose, offset=0, name='deconv')
# Reshape x the right way.
sh = x.get_shape().as_list()
x = tf.reshape(x, shape=[task_params.batch_size, -1] + sh[1:])
out.deconv_output = x
# Separate out the map and the confidence predictions, pass the confidence
# through a sigmoid.
if split_maps:
with tf.name_scope('split'):
out_all = tf.split(value=x, axis=4, num_or_size_splits=2*num_maps)
out.fss_logits = out_all[:num_maps]
out.confs_logits = out_all[num_maps:]
with tf.name_scope('sigmoid'):
out.confs_probs = [tf.nn.sigmoid(x) for x in out.confs_logits]
return out
def setup_to_run(m, args, is_training, batch_norm_is_training, summary_mode):
assert(args.arch.multi_scale), 'removed support for old single scale code.'
# Set up the model.
tf.set_random_seed(args.solver.seed)
task_params = args.navtask.task_params
batch_norm_is_training_op = \
tf.placeholder_with_default(batch_norm_is_training, shape=[],
name='batch_norm_is_training_op')
# Setup the inputs
m.input_tensors = {}
m.train_ops = {}
m.input_tensors['common'], m.input_tensors['step'], m.input_tensors['train'] = \
_inputs(task_params)
m.init_fn = None
if task_params.input_type == 'vision':
m.vision_ops = get_map_from_images(
m.input_tensors['step']['imgs'], args.mapper_arch,
task_params, args.solver.freeze_conv,
args.solver.wt_decay, is_training, batch_norm_is_training_op,
num_maps=len(task_params.map_crop_sizes))
# Load variables from snapshot if needed.
if args.solver.pretrained_path is not None:
m.init_fn = slim.assign_from_checkpoint_fn(args.solver.pretrained_path,
m.vision_ops.vars_to_restore)
# Set up caching of vision features if needed.
if args.solver.freeze_conv:
m.train_ops['step_data_cache'] = [m.vision_ops.encoder_output]
else:
m.train_ops['step_data_cache'] = []
# Set up blobs that are needed for the computation in rest of the graph.
m.ego_map_ops = m.vision_ops.fss_logits
m.coverage_ops = m.vision_ops.confs_probs
# Zero pad these to make them same size as what the planner expects.
for i in range(len(m.ego_map_ops)):
if args.mapper_arch.pad_map_with_zeros_each[i] > 0:
paddings = np.zeros((5,2), dtype=np.int32)
paddings[2:4,:] = args.mapper_arch.pad_map_with_zeros_each[i]
paddings_op = tf.constant(paddings, dtype=tf.int32)
m.ego_map_ops[i] = tf.pad(m.ego_map_ops[i], paddings=paddings_op)
m.coverage_ops[i] = tf.pad(m.coverage_ops[i], paddings=paddings_op)
elif task_params.input_type == 'analytical_counts':
m.ego_map_ops = []; m.coverage_ops = []
for i in range(len(task_params.map_crop_sizes)):
ego_map_op = m.input_tensors['step']['analytical_counts_{:d}'.format(i)]
coverage_op = tf.cast(tf.greater_equal(
tf.reduce_max(ego_map_op, reduction_indices=[4],
keep_dims=True), 1), tf.float32)
coverage_op = tf.ones_like(ego_map_op) * coverage_op
m.ego_map_ops.append(ego_map_op)
m.coverage_ops.append(coverage_op)
m.train_ops['step_data_cache'] = []
num_steps = task_params.num_steps
num_goals = task_params.num_goals
map_crop_size_ops = []
for map_crop_size in task_params.map_crop_sizes:
map_crop_size_ops.append(tf.constant(map_crop_size, dtype=tf.int32, shape=(2,)))
with tf.name_scope('check_size'):
is_single_step = tf.equal(tf.unstack(tf.shape(m.ego_map_ops[0]), num=5)[1], 1)
fr_ops = []; value_ops = [];
fr_intermediate_ops = []; value_intermediate_ops = [];
crop_value_ops = [];
resize_crop_value_ops = [];
confs = []; occupancys = [];
previous_value_op = None
updated_state = []; state_names = [];
for i in range(len(task_params.map_crop_sizes)):
map_crop_size = task_params.map_crop_sizes[i]
with tf.variable_scope('scale_{:d}'.format(i)):
# Accumulate the map.
fn = lambda ns: running_combine(
m.ego_map_ops[i],
m.coverage_ops[i],
m.input_tensors['step']['incremental_locs'] * task_params.map_scales[i],
m.input_tensors['step']['incremental_thetas'],
m.input_tensors['step']['running_sum_num_{:d}'.format(i)],
m.input_tensors['step']['running_sum_denom_{:d}'.format(i)],
m.input_tensors['step']['running_max_denom_{:d}'.format(i)],
map_crop_size, ns)
running_sum_num, running_sum_denom, running_max_denom = \
tf.cond(is_single_step, lambda: fn(1), lambda: fn(num_steps*num_goals))
updated_state += [running_sum_num, running_sum_denom, running_max_denom]
state_names += ['running_sum_num_{:d}'.format(i),
'running_sum_denom_{:d}'.format(i),
'running_max_denom_{:d}'.format(i)]
# Concat the accumulated map and goal
occupancy = running_sum_num / tf.maximum(running_sum_denom, 0.001)
conf = running_max_denom
# print occupancy.get_shape().as_list()
# Concat occupancy, how much occupied and goal.
with tf.name_scope('concat'):
sh = [-1, map_crop_size, map_crop_size, task_params.map_channels]
occupancy = tf.reshape(occupancy, shape=sh)
conf = tf.reshape(conf, shape=sh)
sh = [-1, map_crop_size, map_crop_size, task_params.goal_channels]
goal = tf.reshape(m.input_tensors['step']['ego_goal_imgs_{:d}'.format(i)], shape=sh)
to_concat = [occupancy, conf, goal]
if previous_value_op is not None:
to_concat.append(previous_value_op)
x = tf.concat(to_concat, 3)
# Pass the map, previous rewards and the goal through a few convolutional
# layers to get fR.
fr_op, fr_intermediate_op = fr_v2(
x, output_neurons=args.arch.fr_neurons,
inside_neurons=args.arch.fr_inside_neurons,
is_training=batch_norm_is_training_op, name='fr',
wt_decay=args.solver.wt_decay, stride=args.arch.fr_stride)
# Do Value Iteration on the fR
if args.arch.vin_num_iters > 0:
value_op, value_intermediate_op = value_iteration_network(
fr_op, num_iters=args.arch.vin_num_iters,
val_neurons=args.arch.vin_val_neurons,
action_neurons=args.arch.vin_action_neurons,
kernel_size=args.arch.vin_ks, share_wts=args.arch.vin_share_wts,
name='vin', wt_decay=args.solver.wt_decay)
else:
value_op = fr_op
value_intermediate_op = []
# Crop out and upsample the previous value map.
remove = args.arch.crop_remove_each
if remove > 0:
crop_value_op = value_op[:, remove:-remove, remove:-remove,:]
else:
crop_value_op = value_op
crop_value_op = tf.reshape(crop_value_op, shape=[-1, args.arch.value_crop_size,
args.arch.value_crop_size,
args.arch.vin_val_neurons])
if i < len(task_params.map_crop_sizes)-1:
# Reshape it to shape of the next scale.
previous_value_op = tf.image.resize_bilinear(crop_value_op,
map_crop_size_ops[i+1],
align_corners=True)
resize_crop_value_ops.append(previous_value_op)
occupancys.append(occupancy)
confs.append(conf)
value_ops.append(value_op)
crop_value_ops.append(crop_value_op)
fr_ops.append(fr_op)
fr_intermediate_ops.append(fr_intermediate_op)
m.value_ops = value_ops
m.value_intermediate_ops = value_intermediate_ops
m.fr_ops = fr_ops
m.fr_intermediate_ops = fr_intermediate_ops
m.final_value_op = crop_value_op
m.crop_value_ops = crop_value_ops
m.resize_crop_value_ops = resize_crop_value_ops
m.confs = confs
m.occupancys = occupancys
sh = [-1, args.arch.vin_val_neurons*((args.arch.value_crop_size)**2)]
m.value_features_op = tf.reshape(m.final_value_op, sh, name='reshape_value_op')
# Determine what action to take.
with tf.variable_scope('action_pred'):
batch_norm_param = args.arch.pred_batch_norm_param
if batch_norm_param is not None:
batch_norm_param['is_training'] = batch_norm_is_training_op
m.action_logits_op, _ = tf_utils.fc_network(
m.value_features_op, neurons=args.arch.pred_neurons,
wt_decay=args.solver.wt_decay, name='pred', offset=0,
num_pred=task_params.num_actions,
batch_norm_param=batch_norm_param)
m.action_prob_op = tf.nn.softmax(m.action_logits_op)
init_state = tf.constant(0., dtype=tf.float32, shape=[
task_params.batch_size, 1, map_crop_size, map_crop_size,
task_params.map_channels])
m.train_ops['state_names'] = state_names
m.train_ops['updated_state'] = updated_state
m.train_ops['init_state'] = [init_state for _ in updated_state]
m.train_ops['step'] = m.action_prob_op
m.train_ops['common'] = [m.input_tensors['common']['orig_maps'],
m.input_tensors['common']['goal_loc']]
m.train_ops['batch_norm_is_training_op'] = batch_norm_is_training_op
m.loss_ops = []; m.loss_ops_names = [];
if args.arch.readout_maps:
with tf.name_scope('readout_maps'):
all_occupancys = tf.concat(m.occupancys + m.confs, 3)
readout_maps, probs = readout_general(
all_occupancys, num_neurons=args.arch.rom_arch.num_neurons,
strides=args.arch.rom_arch.strides,
layers_per_block=args.arch.rom_arch.layers_per_block,
kernel_size=args.arch.rom_arch.kernel_size,
batch_norm_is_training_op=batch_norm_is_training_op,
wt_decay=args.solver.wt_decay)
gt_ego_maps = [m.input_tensors['step']['readout_maps_{:d}'.format(i)]
for i in range(len(task_params.readout_maps_crop_sizes))]
m.readout_maps_gt = tf.concat(gt_ego_maps, 4)
gt_shape = tf.shape(m.readout_maps_gt)
m.readout_maps_logits = tf.reshape(readout_maps, gt_shape)
m.readout_maps_probs = tf.reshape(probs, gt_shape)
# Add a loss op
m.readout_maps_loss_op = tf.losses.sigmoid_cross_entropy(
tf.reshape(m.readout_maps_gt, [-1, len(task_params.readout_maps_crop_sizes)]),
tf.reshape(readout_maps, [-1, len(task_params.readout_maps_crop_sizes)]),
scope='loss')
m.readout_maps_loss_op = 10.*m.readout_maps_loss_op
ewma_decay = 0.99 if is_training else 0.0
weight = tf.ones_like(m.input_tensors['train']['action'], dtype=tf.float32,
name='weight')
m.reg_loss_op, m.data_loss_op, m.total_loss_op, m.acc_ops = \
compute_losses_multi_or(m.action_logits_op,
m.input_tensors['train']['action'], weights=weight,
num_actions=task_params.num_actions,
data_loss_wt=args.solver.data_loss_wt,
reg_loss_wt=args.solver.reg_loss_wt,
ewma_decay=ewma_decay)
if args.arch.readout_maps:
m.total_loss_op = m.total_loss_op + m.readout_maps_loss_op
m.loss_ops += [m.readout_maps_loss_op]
m.loss_ops_names += ['readout_maps_loss']
m.loss_ops += [m.reg_loss_op, m.data_loss_op, m.total_loss_op]
m.loss_ops_names += ['reg_loss', 'data_loss', 'total_loss']
if args.solver.freeze_conv:
vars_to_optimize = list(set(tf.trainable_variables()) -
set(m.vision_ops.vars_to_restore))
else:
vars_to_optimize = None
m.lr_op, m.global_step_op, m.train_op, m.should_stop_op, m.optimizer, \
m.sync_optimizer = tf_utils.setup_training(
m.total_loss_op,
args.solver.initial_learning_rate,
args.solver.steps_per_decay,
args.solver.learning_rate_decay,
args.solver.momentum,
args.solver.max_steps,
args.solver.sync,
args.solver.adjust_lr_sync,
args.solver.num_workers,
args.solver.task,
vars_to_optimize=vars_to_optimize,
clip_gradient_norm=args.solver.clip_gradient_norm,
typ=args.solver.typ, momentum2=args.solver.momentum2,
adam_eps=args.solver.adam_eps)
if args.arch.sample_gt_prob_type == 'inverse_sigmoid_decay':
m.sample_gt_prob_op = tf_utils.inverse_sigmoid_decay(args.arch.isd_k,
m.global_step_op)
elif args.arch.sample_gt_prob_type == 'zero':
m.sample_gt_prob_op = tf.constant(-1.0, dtype=tf.float32)
elif args.arch.sample_gt_prob_type.split('_')[0] == 'step':
step = int(args.arch.sample_gt_prob_type.split('_')[1])
m.sample_gt_prob_op = tf_utils.step_gt_prob(
step, m.input_tensors['step']['step_number'][0,0,0])
m.sample_action_type = args.arch.action_sample_type
m.sample_action_combine_type = args.arch.action_sample_combine_type
m.summary_ops = {
summary_mode: _add_summaries(m, args, summary_mode,
args.summary.arop_full_summary_iters)}
m.init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
m.saver_op = tf.train.Saver(keep_checkpoint_every_n_hours=4,
write_version=tf.train.SaverDef.V2)
return m
|
apache-2.0
|
rohit21122012/DCASE2013
|
runs/2013/dnn_layerwise/bs1024/dnn_4layer/src/evaluation.py
|
38
|
42838
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import numpy
import math
from sklearn import metrics
class DCASE2016_SceneClassification_Metrics():
"""DCASE 2016 scene classification metrics
Examples
--------
>>> dcase2016_scene_metric = DCASE2016_SceneClassification_Metrics(class_list=dataset.scene_labels)
>>> for fold in dataset.folds(mode=dataset_evaluation_mode):
>>> results = []
>>> result_filename = get_result_filename(fold=fold, path=result_path)
>>>
>>> if os.path.isfile(result_filename):
>>> with open(result_filename, 'rt') as f:
>>> for row in csv.reader(f, delimiter='\t'):
>>> results.append(row)
>>>
>>> y_true = []
>>> y_pred = []
>>> for result in results:
>>> y_true.append(dataset.file_meta(result[0])[0]['scene_label'])
>>> y_pred.append(result[1])
>>>
>>> dcase2016_scene_metric.evaluate(system_output=y_pred, annotated_ground_truth=y_true)
>>>
>>> results = dcase2016_scene_metric.results()
"""
def __init__(self, class_list):
"""__init__ method.
Parameters
----------
class_list : list
Evaluated scene labels in the list
"""
self.accuracies_per_class = None
self.Nsys = None
self.Nref = None
self.class_list = class_list
self.eps = numpy.spacing(1)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
return self.results()
def accuracies(self, y_true, y_pred, labels):
"""Calculate accuracy
Parameters
----------
y_true : numpy.array
Ground truth array, list of scene labels
y_pred : numpy.array
System output array, list of scene labels
labels : list
list of scene labels
Returns
-------
array : numpy.array [shape=(number of scene labels,)]
Accuracy per scene label class
"""
confusion_matrix = metrics.confusion_matrix(y_true=y_true, y_pred=y_pred, labels=labels).astype(float)
#print confusion_matrix
temp = numpy.divide(numpy.diag(confusion_matrix), numpy.sum(confusion_matrix, 1)+self.eps)
#print temp
return temp
def evaluate(self, annotated_ground_truth, system_output):
"""Evaluate system output and annotated ground truth pair.
Use results method to get results.
Parameters
----------
annotated_ground_truth : numpy.array
Ground truth array, list of scene labels
system_output : numpy.array
System output array, list of scene labels
Returns
-------
nothing
"""
accuracies_per_class = self.accuracies(y_pred=system_output, y_true=annotated_ground_truth, labels=self.class_list)
if self.accuracies_per_class is None:
self.accuracies_per_class = accuracies_per_class
else:
self.accuracies_per_class = numpy.vstack((self.accuracies_per_class, accuracies_per_class))
Nref = numpy.zeros(len(self.class_list))
Nsys = numpy.zeros(len(self.class_list))
for class_id, class_label in enumerate(self.class_list):
for item in system_output:
if item == class_label:
Nsys[class_id] += 1
for item in annotated_ground_truth:
if item == class_label:
Nref[class_id] += 1
if self.Nref is None:
self.Nref = Nref
else:
self.Nref = numpy.vstack((self.Nref, Nref))
if self.Nsys is None:
self.Nsys = Nsys
else:
self.Nsys = numpy.vstack((self.Nsys, Nsys))
def results(self):
"""Get results
Outputs results in dict, format:
{
'class_wise_data':
{
'office': {
'Nsys': 10,
'Nref': 7,
},
}
'class_wise_accuracy':
{
'office': 0.6,
'home': 0.4,
}
'overall_accuracy': numpy.mean(self.accuracies_per_class)
'Nsys': 100,
'Nref': 100,
}
Parameters
----------
nothing
Returns
-------
results : dict
Results dict
"""
results = {
'class_wise_data': {},
'class_wise_accuracy': {},
'overall_accuracy': numpy.mean(self.accuracies_per_class)
}
if len(self.Nsys.shape) == 2:
results['Nsys'] = int(sum(sum(self.Nsys)))
results['Nref'] = int(sum(sum(self.Nref)))
else:
results['Nsys'] = int(sum(self.Nsys))
results['Nref'] = int(sum(self.Nref))
for class_id, class_label in enumerate(self.class_list):
if len(self.accuracies_per_class.shape) == 2:
results['class_wise_accuracy'][class_label] = numpy.mean(self.accuracies_per_class[:, class_id])
results['class_wise_data'][class_label] = {
'Nsys': int(sum(self.Nsys[:, class_id])),
'Nref': int(sum(self.Nref[:, class_id])),
}
else:
results['class_wise_accuracy'][class_label] = numpy.mean(self.accuracies_per_class[class_id])
results['class_wise_data'][class_label] = {
'Nsys': int(self.Nsys[class_id]),
'Nref': int(self.Nref[class_id]),
}
return results
class EventDetectionMetrics(object):
"""Baseclass for sound event metric classes.
"""
def __init__(self, class_list):
"""__init__ method.
Parameters
----------
class_list : list
List of class labels to be evaluated.
"""
self.class_list = class_list
self.eps = numpy.spacing(1)
def max_event_offset(self, data):
"""Get maximum event offset from event list
Parameters
----------
data : list
Event list, list of event dicts
Returns
-------
max : float > 0
Maximum event offset
"""
max = 0
for event in data:
if event['event_offset'] > max:
max = event['event_offset']
return max
def list_to_roll(self, data, time_resolution=0.01):
"""Convert event list into event roll.
Event roll is binary matrix indicating event activity withing time segment defined by time_resolution.
Parameters
----------
data : list
Event list, list of event dicts
time_resolution : float > 0
Time resolution used when converting event into event roll.
Returns
-------
event_roll : numpy.ndarray [shape=(math.ceil(data_length * 1 / time_resolution) + 1, amount of classes)]
Event roll
"""
# Initialize
data_length = self.max_event_offset(data)
event_roll = numpy.zeros((math.ceil(data_length * 1 / time_resolution) + 1, len(self.class_list)))
# Fill-in event_roll
for event in data:
pos = self.class_list.index(event['event_label'].rstrip())
onset = math.floor(event['event_onset'] * 1 / time_resolution)
offset = math.ceil(event['event_offset'] * 1 / time_resolution) + 1
event_roll[onset:offset, pos] = 1
return event_roll
class DCASE2016_EventDetection_SegmentBasedMetrics(EventDetectionMetrics):
"""DCASE2016 Segment based metrics for sound event detection
Supported metrics:
- Overall
- Error rate (ER), Substitutions (S), Insertions (I), Deletions (D)
- F-score (F1)
- Class-wise
- Error rate (ER), Insertions (I), Deletions (D)
- F-score (F1)
Examples
--------
>>> overall_metrics_per_scene = {}
>>> for scene_id, scene_label in enumerate(dataset.scene_labels):
>>> dcase2016_segment_based_metric = DCASE2016_EventDetection_SegmentBasedMetrics(class_list=dataset.event_labels(scene_label=scene_label))
>>> for fold in dataset.folds(mode=dataset_evaluation_mode):
>>> results = []
>>> result_filename = get_result_filename(fold=fold, scene_label=scene_label, path=result_path)
>>>
>>> if os.path.isfile(result_filename):
>>> with open(result_filename, 'rt') as f:
>>> for row in csv.reader(f, delimiter='\t'):
>>> results.append(row)
>>>
>>> for file_id, item in enumerate(dataset.test(fold,scene_label=scene_label)):
>>> current_file_results = []
>>> for result_line in results:
>>> if result_line[0] == dataset.absolute_to_relative(item['file']):
>>> current_file_results.append(
>>> {'file': result_line[0],
>>> 'event_onset': float(result_line[1]),
>>> 'event_offset': float(result_line[2]),
>>> 'event_label': result_line[3]
>>> }
>>> )
>>> meta = dataset.file_meta(dataset.absolute_to_relative(item['file']))
>>> dcase2016_segment_based_metric.evaluate(system_output=current_file_results, annotated_ground_truth=meta)
>>> overall_metrics_per_scene[scene_label]['segment_based_metrics'] = dcase2016_segment_based_metric.results()
"""
def __init__(self, class_list, time_resolution=1.0):
"""__init__ method.
Parameters
----------
class_list : list
List of class labels to be evaluated.
time_resolution : float > 0
Time resolution used when converting event into event roll.
(Default value = 1.0)
"""
self.time_resolution = time_resolution
self.overall = {
'Ntp': 0.0,
'Ntn': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
'Nref': 0.0,
'Nsys': 0.0,
'ER': 0.0,
'S': 0.0,
'D': 0.0,
'I': 0.0,
}
self.class_wise = {}
for class_label in class_list:
self.class_wise[class_label] = {
'Ntp': 0.0,
'Ntn': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
'Nref': 0.0,
'Nsys': 0.0,
}
EventDetectionMetrics.__init__(self, class_list=class_list)
def __enter__(self):
# Initialize class and return it
return self
def __exit__(self, type, value, traceback):
# Finalize evaluation and return results
return self.results()
def evaluate(self, annotated_ground_truth, system_output):
"""Evaluate system output and annotated ground truth pair.
Use results method to get results.
Parameters
----------
annotated_ground_truth : numpy.array
Ground truth array, list of scene labels
system_output : numpy.array
System output array, list of scene labels
Returns
-------
nothing
"""
# Convert event list into frame-based representation
system_event_roll = self.list_to_roll(data=system_output, time_resolution=self.time_resolution)
annotated_event_roll = self.list_to_roll(data=annotated_ground_truth, time_resolution=self.time_resolution)
# Fix durations of both event_rolls to be equal
if annotated_event_roll.shape[0] > system_event_roll.shape[0]:
padding = numpy.zeros((annotated_event_roll.shape[0] - system_event_roll.shape[0], len(self.class_list)))
system_event_roll = numpy.vstack((system_event_roll, padding))
if system_event_roll.shape[0] > annotated_event_roll.shape[0]:
padding = numpy.zeros((system_event_roll.shape[0] - annotated_event_roll.shape[0], len(self.class_list)))
annotated_event_roll = numpy.vstack((annotated_event_roll, padding))
# Compute segment-based overall metrics
for segment_id in range(0, annotated_event_roll.shape[0]):
annotated_segment = annotated_event_roll[segment_id, :]
system_segment = system_event_roll[segment_id, :]
Ntp = sum(system_segment + annotated_segment > 1)
Ntn = sum(system_segment + annotated_segment == 0)
Nfp = sum(system_segment - annotated_segment > 0)
Nfn = sum(annotated_segment - system_segment > 0)
Nref = sum(annotated_segment)
Nsys = sum(system_segment)
S = min(Nref, Nsys) - Ntp
D = max(0, Nref - Nsys)
I = max(0, Nsys - Nref)
ER = max(Nref, Nsys) - Ntp
self.overall['Ntp'] += Ntp
self.overall['Ntn'] += Ntn
self.overall['Nfp'] += Nfp
self.overall['Nfn'] += Nfn
self.overall['Nref'] += Nref
self.overall['Nsys'] += Nsys
self.overall['S'] += S
self.overall['D'] += D
self.overall['I'] += I
self.overall['ER'] += ER
for class_id, class_label in enumerate(self.class_list):
annotated_segment = annotated_event_roll[:, class_id]
system_segment = system_event_roll[:, class_id]
Ntp = sum(system_segment + annotated_segment > 1)
Ntn = sum(system_segment + annotated_segment == 0)
Nfp = sum(system_segment - annotated_segment > 0)
Nfn = sum(annotated_segment - system_segment > 0)
Nref = sum(annotated_segment)
Nsys = sum(system_segment)
self.class_wise[class_label]['Ntp'] += Ntp
self.class_wise[class_label]['Ntn'] += Ntn
self.class_wise[class_label]['Nfp'] += Nfp
self.class_wise[class_label]['Nfn'] += Nfn
self.class_wise[class_label]['Nref'] += Nref
self.class_wise[class_label]['Nsys'] += Nsys
return self
def results(self):
"""Get results
Outputs results in dict, format:
{
'overall':
{
'Pre':
'Rec':
'F':
'ER':
'S':
'D':
'I':
}
'class_wise':
{
'office': {
'Pre':
'Rec':
'F':
'ER':
'D':
'I':
'Nref':
'Nsys':
'Ntp':
'Nfn':
'Nfp':
},
}
'class_wise_average':
{
'F':
'ER':
}
}
Parameters
----------
nothing
Returns
-------
results : dict
Results dict
"""
results = {'overall': {},
'class_wise': {},
'class_wise_average': {},
}
# Overall metrics
results['overall']['Pre'] = self.overall['Ntp'] / (self.overall['Nsys'] + self.eps)
results['overall']['Rec'] = self.overall['Ntp'] / self.overall['Nref']
results['overall']['F'] = 2 * ((results['overall']['Pre'] * results['overall']['Rec']) / (results['overall']['Pre'] + results['overall']['Rec'] + self.eps))
results['overall']['ER'] = self.overall['ER'] / self.overall['Nref']
results['overall']['S'] = self.overall['S'] / self.overall['Nref']
results['overall']['D'] = self.overall['D'] / self.overall['Nref']
results['overall']['I'] = self.overall['I'] / self.overall['Nref']
# Class-wise metrics
class_wise_F = []
class_wise_ER = []
for class_id, class_label in enumerate(self.class_list):
if class_label not in results['class_wise']:
results['class_wise'][class_label] = {}
results['class_wise'][class_label]['Pre'] = self.class_wise[class_label]['Ntp'] / (self.class_wise[class_label]['Nsys'] + self.eps)
results['class_wise'][class_label]['Rec'] = self.class_wise[class_label]['Ntp'] / (self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['F'] = 2 * ((results['class_wise'][class_label]['Pre'] * results['class_wise'][class_label]['Rec']) / (results['class_wise'][class_label]['Pre'] + results['class_wise'][class_label]['Rec'] + self.eps))
results['class_wise'][class_label]['ER'] = (self.class_wise[class_label]['Nfn'] + self.class_wise[class_label]['Nfp']) / (self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['D'] = self.class_wise[class_label]['Nfn'] / (self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['I'] = self.class_wise[class_label]['Nfp'] / (self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['Nref'] = self.class_wise[class_label]['Nref']
results['class_wise'][class_label]['Nsys'] = self.class_wise[class_label]['Nsys']
results['class_wise'][class_label]['Ntp'] = self.class_wise[class_label]['Ntp']
results['class_wise'][class_label]['Nfn'] = self.class_wise[class_label]['Nfn']
results['class_wise'][class_label]['Nfp'] = self.class_wise[class_label]['Nfp']
class_wise_F.append(results['class_wise'][class_label]['F'])
class_wise_ER.append(results['class_wise'][class_label]['ER'])
results['class_wise_average']['F'] = numpy.mean(class_wise_F)
results['class_wise_average']['ER'] = numpy.mean(class_wise_ER)
return results
class DCASE2016_EventDetection_EventBasedMetrics(EventDetectionMetrics):
"""DCASE2016 Event based metrics for sound event detection
Supported metrics:
- Overall
- Error rate (ER), Substitutions (S), Insertions (I), Deletions (D)
- F-score (F1)
- Class-wise
- Error rate (ER), Insertions (I), Deletions (D)
- F-score (F1)
Examples
--------
>>> overall_metrics_per_scene = {}
>>> for scene_id, scene_label in enumerate(dataset.scene_labels):
>>> dcase2016_event_based_metric = DCASE2016_EventDetection_EventBasedMetrics(class_list=dataset.event_labels(scene_label=scene_label))
>>> for fold in dataset.folds(mode=dataset_evaluation_mode):
>>> results = []
>>> result_filename = get_result_filename(fold=fold, scene_label=scene_label, path=result_path)
>>>
>>> if os.path.isfile(result_filename):
>>> with open(result_filename, 'rt') as f:
>>> for row in csv.reader(f, delimiter='\t'):
>>> results.append(row)
>>>
>>> for file_id, item in enumerate(dataset.test(fold,scene_label=scene_label)):
>>> current_file_results = []
>>> for result_line in results:
>>> if result_line[0] == dataset.absolute_to_relative(item['file']):
>>> current_file_results.append(
>>> {'file': result_line[0],
>>> 'event_onset': float(result_line[1]),
>>> 'event_offset': float(result_line[2]),
>>> 'event_label': result_line[3]
>>> }
>>> )
>>> meta = dataset.file_meta(dataset.absolute_to_relative(item['file']))
>>> dcase2016_event_based_metric.evaluate(system_output=current_file_results, annotated_ground_truth=meta)
>>> overall_metrics_per_scene[scene_label]['event_based_metrics'] = dcase2016_event_based_metric.results()
"""
def __init__(self, class_list, time_resolution=1.0, t_collar=0.2):
"""__init__ method.
Parameters
----------
class_list : list
List of class labels to be evaluated.
time_resolution : float > 0
Time resolution used when converting event into event roll.
(Default value = 1.0)
t_collar : float > 0
Time collar for event onset and offset condition
(Default value = 0.2)
"""
self.time_resolution = time_resolution
self.t_collar = t_collar
self.overall = {
'Nref': 0.0,
'Nsys': 0.0,
'Nsubs': 0.0,
'Ntp': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
}
self.class_wise = {}
for class_label in class_list:
self.class_wise[class_label] = {
'Nref': 0.0,
'Nsys': 0.0,
'Ntp': 0.0,
'Ntn': 0.0,
'Nfp': 0.0,
'Nfn': 0.0,
}
EventDetectionMetrics.__init__(self, class_list=class_list)
def __enter__(self):
# Initialize class and return it
return self
def __exit__(self, type, value, traceback):
# Finalize evaluation and return results
return self.results()
def evaluate(self, annotated_ground_truth, system_output):
"""Evaluate system output and annotated ground truth pair.
Use results method to get results.
Parameters
----------
annotated_ground_truth : numpy.array
Ground truth array, list of scene labels
system_output : numpy.array
System output array, list of scene labels
Returns
-------
nothing
"""
# Overall metrics
# Total number of detected and reference events
Nsys = len(system_output)
Nref = len(annotated_ground_truth)
sys_correct = numpy.zeros(Nsys, dtype=bool)
ref_correct = numpy.zeros(Nref, dtype=bool)
# Number of correctly transcribed events, onset/offset within a t_collar range
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
label_condition = annotated_ground_truth[j]['event_label'] == system_output[i]['event_label']
onset_condition = self.onset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
offset_condition = self.offset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
if label_condition and onset_condition and offset_condition:
ref_correct[j] = True
sys_correct[i] = True
break
Ntp = numpy.sum(sys_correct)
sys_leftover = numpy.nonzero(numpy.negative(sys_correct))[0]
ref_leftover = numpy.nonzero(numpy.negative(ref_correct))[0]
# Substitutions
Nsubs = 0
for j in ref_leftover:
for i in sys_leftover:
onset_condition = self.onset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
offset_condition = self.offset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
if onset_condition and offset_condition:
Nsubs += 1
break
Nfp = Nsys - Ntp - Nsubs
Nfn = Nref - Ntp - Nsubs
self.overall['Nref'] += Nref
self.overall['Nsys'] += Nsys
self.overall['Ntp'] += Ntp
self.overall['Nsubs'] += Nsubs
self.overall['Nfp'] += Nfp
self.overall['Nfn'] += Nfn
# Class-wise metrics
for class_id, class_label in enumerate(self.class_list):
Nref = 0.0
Nsys = 0.0
Ntp = 0.0
# Count event frequencies in the ground truth
for i in range(0, len(annotated_ground_truth)):
if annotated_ground_truth[i]['event_label'] == class_label:
Nref += 1
# Count event frequencies in the system output
for i in range(0, len(system_output)):
if system_output[i]['event_label'] == class_label:
Nsys += 1
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
if annotated_ground_truth[j]['event_label'] == class_label and system_output[i]['event_label'] == class_label:
onset_condition = self.onset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
offset_condition = self.offset_condition(annotated_event=annotated_ground_truth[j],
system_event=system_output[i],
t_collar=self.t_collar)
if onset_condition and offset_condition:
Ntp += 1
break
Nfp = Nsys - Ntp
Nfn = Nref - Ntp
self.class_wise[class_label]['Nref'] += Nref
self.class_wise[class_label]['Nsys'] += Nsys
self.class_wise[class_label]['Ntp'] += Ntp
self.class_wise[class_label]['Nfp'] += Nfp
self.class_wise[class_label]['Nfn'] += Nfn
def onset_condition(self, annotated_event, system_event, t_collar=0.200):
"""Onset condition, checked does the event pair fulfill condition
Condition:
- event onsets are within t_collar each other
Parameters
----------
annotated_event : dict
Event dict
system_event : dict
Event dict
t_collar : float > 0
Defines how close event onsets have to be in order to be considered match. In seconds.
(Default value = 0.2)
Returns
-------
result : bool
Condition result
"""
return math.fabs(annotated_event['event_onset'] - system_event['event_onset']) <= t_collar
def offset_condition(self, annotated_event, system_event, t_collar=0.200, percentage_of_length=0.5):
"""Offset condition, checking does the event pair fulfill condition
Condition:
- event offsets are within t_collar each other
or
- system event offset is within the percentage_of_length*annotated event_length
Parameters
----------
annotated_event : dict
Event dict
system_event : dict
Event dict
t_collar : float > 0
Defines how close event onsets have to be in order to be considered match. In seconds.
(Default value = 0.2)
percentage_of_length : float [0-1]
Returns
-------
result : bool
Condition result
"""
annotated_length = annotated_event['event_offset'] - annotated_event['event_onset']
return math.fabs(annotated_event['event_offset'] - system_event['event_offset']) <= max(t_collar, percentage_of_length * annotated_length)
def results(self):
"""Get results
Outputs results in dict, format:
{
'overall':
{
'Pre':
'Rec':
'F':
'ER':
'S':
'D':
'I':
}
'class_wise':
{
'office': {
'Pre':
'Rec':
'F':
'ER':
'D':
'I':
'Nref':
'Nsys':
'Ntp':
'Nfn':
'Nfp':
},
}
'class_wise_average':
{
'F':
'ER':
}
}
Parameters
----------
nothing
Returns
-------
results : dict
Results dict
"""
results = {
'overall': {},
'class_wise': {},
'class_wise_average': {},
}
# Overall metrics
results['overall']['Pre'] = self.overall['Ntp'] / (self.overall['Nsys'] + self.eps)
results['overall']['Rec'] = self.overall['Ntp'] / self.overall['Nref']
results['overall']['F'] = 2 * ((results['overall']['Pre'] * results['overall']['Rec']) / (results['overall']['Pre'] + results['overall']['Rec'] + self.eps))
results['overall']['ER'] = (self.overall['Nfn'] + self.overall['Nfp'] + self.overall['Nsubs']) / self.overall['Nref']
results['overall']['S'] = self.overall['Nsubs'] / self.overall['Nref']
results['overall']['D'] = self.overall['Nfn'] / self.overall['Nref']
results['overall']['I'] = self.overall['Nfp'] / self.overall['Nref']
# Class-wise metrics
class_wise_F = []
class_wise_ER = []
for class_label in self.class_list:
if class_label not in results['class_wise']:
results['class_wise'][class_label] = {}
results['class_wise'][class_label]['Pre'] = self.class_wise[class_label]['Ntp'] / (self.class_wise[class_label]['Nsys'] + self.eps)
results['class_wise'][class_label]['Rec'] = self.class_wise[class_label]['Ntp'] / (self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['F'] = 2 * ((results['class_wise'][class_label]['Pre'] * results['class_wise'][class_label]['Rec']) / (results['class_wise'][class_label]['Pre'] + results['class_wise'][class_label]['Rec'] + self.eps))
results['class_wise'][class_label]['ER'] = (self.class_wise[class_label]['Nfn']+self.class_wise[class_label]['Nfp']) / (self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['D'] = self.class_wise[class_label]['Nfn'] / (self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['I'] = self.class_wise[class_label]['Nfp'] / (self.class_wise[class_label]['Nref'] + self.eps)
results['class_wise'][class_label]['Nref'] = self.class_wise[class_label]['Nref']
results['class_wise'][class_label]['Nsys'] = self.class_wise[class_label]['Nsys']
results['class_wise'][class_label]['Ntp'] = self.class_wise[class_label]['Ntp']
results['class_wise'][class_label]['Nfn'] = self.class_wise[class_label]['Nfn']
results['class_wise'][class_label]['Nfp'] = self.class_wise[class_label]['Nfp']
class_wise_F.append(results['class_wise'][class_label]['F'])
class_wise_ER.append(results['class_wise'][class_label]['ER'])
# Class-wise average
results['class_wise_average']['F'] = numpy.mean(class_wise_F)
results['class_wise_average']['ER'] = numpy.mean(class_wise_ER)
return results
class DCASE2013_EventDetection_Metrics(EventDetectionMetrics):
"""Lecagy DCASE2013 metrics, converted from the provided Matlab implementation
Supported metrics:
- Frame based
- F-score (F)
- AEER
- Event based
- Onset
- F-Score (F)
- AEER
- Onset-offset
- F-Score (F)
- AEER
- Class based
- Onset
- F-Score (F)
- AEER
- Onset-offset
- F-Score (F)
- AEER
"""
#
def frame_based(self, annotated_ground_truth, system_output, resolution=0.01):
# Convert event list into frame-based representation
system_event_roll = self.list_to_roll(data=system_output, time_resolution=resolution)
annotated_event_roll = self.list_to_roll(data=annotated_ground_truth, time_resolution=resolution)
# Fix durations of both event_rolls to be equal
if annotated_event_roll.shape[0] > system_event_roll.shape[0]:
padding = numpy.zeros((annotated_event_roll.shape[0] - system_event_roll.shape[0], len(self.class_list)))
system_event_roll = numpy.vstack((system_event_roll, padding))
if system_event_roll.shape[0] > annotated_event_roll.shape[0]:
padding = numpy.zeros((system_event_roll.shape[0] - annotated_event_roll.shape[0], len(self.class_list)))
annotated_event_roll = numpy.vstack((annotated_event_roll, padding))
# Compute frame-based metrics
Nref = sum(sum(annotated_event_roll))
Ntot = sum(sum(system_event_roll))
Ntp = sum(sum(system_event_roll + annotated_event_roll > 1))
Nfp = sum(sum(system_event_roll - annotated_event_roll > 0))
Nfn = sum(sum(annotated_event_roll - system_event_roll > 0))
Nsubs = min(Nfp, Nfn)
eps = numpy.spacing(1)
results = dict()
results['Rec'] = Ntp / (Nref + eps)
results['Pre'] = Ntp / (Ntot + eps)
results['F'] = 2 * ((results['Pre'] * results['Rec']) / (results['Pre'] + results['Rec'] + eps))
results['AEER'] = (Nfn + Nfp + Nsubs) / (Nref + eps)
return results
def event_based(self, annotated_ground_truth, system_output):
# Event-based evaluation for event detection task
# outputFile: the output of the event detection system
# GTFile: the ground truth list of events
# Total number of detected and reference events
Ntot = len(system_output)
Nref = len(annotated_ground_truth)
# Number of correctly transcribed events, onset within a +/-100 ms range
Ncorr = 0
NcorrOff = 0
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
if annotated_ground_truth[j]['event_label'] == system_output[i]['event_label'] and (math.fabs(annotated_ground_truth[j]['event_onset'] - system_output[i]['event_onset']) <= 0.1):
Ncorr += 1
# If offset within a +/-100 ms range or within 50% of ground-truth event's duration
if math.fabs(annotated_ground_truth[j]['event_offset'] - system_output[i]['event_offset']) <= max(0.1, 0.5 * (annotated_ground_truth[j]['event_offset'] - annotated_ground_truth[j]['event_onset'])):
NcorrOff += 1
break # In order to not evaluate duplicates
# Compute onset-only event-based metrics
eps = numpy.spacing(1)
results = {
'onset': {},
'onset-offset': {},
}
Nfp = Ntot - Ncorr
Nfn = Nref - Ncorr
Nsubs = min(Nfp, Nfn)
results['onset']['Rec'] = Ncorr / (Nref + eps)
results['onset']['Pre'] = Ncorr / (Ntot + eps)
results['onset']['F'] = 2 * (
(results['onset']['Pre'] * results['onset']['Rec']) / (
results['onset']['Pre'] + results['onset']['Rec'] + eps))
results['onset']['AEER'] = (Nfn + Nfp + Nsubs) / (Nref + eps)
# Compute onset-offset event-based metrics
NfpOff = Ntot - NcorrOff
NfnOff = Nref - NcorrOff
NsubsOff = min(NfpOff, NfnOff)
results['onset-offset']['Rec'] = NcorrOff / (Nref + eps)
results['onset-offset']['Pre'] = NcorrOff / (Ntot + eps)
results['onset-offset']['F'] = 2 * ((results['onset-offset']['Pre'] * results['onset-offset']['Rec']) / (
results['onset-offset']['Pre'] + results['onset-offset']['Rec'] + eps))
results['onset-offset']['AEER'] = (NfnOff + NfpOff + NsubsOff) / (Nref + eps)
return results
def class_based(self, annotated_ground_truth, system_output):
# Class-wise event-based evaluation for event detection task
# outputFile: the output of the event detection system
# GTFile: the ground truth list of events
# Total number of detected and reference events per class
Ntot = numpy.zeros((len(self.class_list), 1))
for event in system_output:
pos = self.class_list.index(event['event_label'])
Ntot[pos] += 1
Nref = numpy.zeros((len(self.class_list), 1))
for event in annotated_ground_truth:
pos = self.class_list.index(event['event_label'])
Nref[pos] += 1
I = (Nref > 0).nonzero()[0] # index for classes present in ground-truth
# Number of correctly transcribed events per class, onset within a +/-100 ms range
Ncorr = numpy.zeros((len(self.class_list), 1))
NcorrOff = numpy.zeros((len(self.class_list), 1))
for j in range(0, len(annotated_ground_truth)):
for i in range(0, len(system_output)):
if annotated_ground_truth[j]['event_label'] == system_output[i]['event_label'] and (
math.fabs(
annotated_ground_truth[j]['event_onset'] - system_output[i]['event_onset']) <= 0.1):
pos = self.class_list.index(system_output[i]['event_label'])
Ncorr[pos] += 1
# If offset within a +/-100 ms range or within 50% of ground-truth event's duration
if math.fabs(annotated_ground_truth[j]['event_offset'] - system_output[i]['event_offset']) <= max(
0.1, 0.5 * (
annotated_ground_truth[j]['event_offset'] - annotated_ground_truth[j][
'event_onset'])):
pos = self.class_list.index(system_output[i]['event_label'])
NcorrOff[pos] += 1
break # In order to not evaluate duplicates
# Compute onset-only class-wise event-based metrics
eps = numpy.spacing(1)
results = {
'onset': {},
'onset-offset': {},
}
Nfp = Ntot - Ncorr
Nfn = Nref - Ncorr
Nsubs = numpy.minimum(Nfp, Nfn)
tempRec = Ncorr[I] / (Nref[I] + eps)
tempPre = Ncorr[I] / (Ntot[I] + eps)
results['onset']['Rec'] = numpy.mean(tempRec)
results['onset']['Pre'] = numpy.mean(tempPre)
tempF = 2 * ((tempPre * tempRec) / (tempPre + tempRec + eps))
results['onset']['F'] = numpy.mean(tempF)
tempAEER = (Nfn[I] + Nfp[I] + Nsubs[I]) / (Nref[I] + eps)
results['onset']['AEER'] = numpy.mean(tempAEER)
# Compute onset-offset class-wise event-based metrics
NfpOff = Ntot - NcorrOff
NfnOff = Nref - NcorrOff
NsubsOff = numpy.minimum(NfpOff, NfnOff)
tempRecOff = NcorrOff[I] / (Nref[I] + eps)
tempPreOff = NcorrOff[I] / (Ntot[I] + eps)
results['onset-offset']['Rec'] = numpy.mean(tempRecOff)
results['onset-offset']['Pre'] = numpy.mean(tempPreOff)
tempFOff = 2 * ((tempPreOff * tempRecOff) / (tempPreOff + tempRecOff + eps))
results['onset-offset']['F'] = numpy.mean(tempFOff)
tempAEEROff = (NfnOff[I] + NfpOff[I] + NsubsOff[I]) / (Nref[I] + eps)
results['onset-offset']['AEER'] = numpy.mean(tempAEEROff)
return results
def main(argv):
# Examples to show usage and required data structures
class_list = ['class1', 'class2', 'class3']
system_output = [
{
'event_label': 'class1',
'event_onset': 0.1,
'event_offset': 1.0
},
{
'event_label': 'class2',
'event_onset': 4.1,
'event_offset': 4.7
},
{
'event_label': 'class3',
'event_onset': 5.5,
'event_offset': 6.7
}
]
annotated_groundtruth = [
{
'event_label': 'class1',
'event_onset': 0.1,
'event_offset': 1.0
},
{
'event_label': 'class2',
'event_onset': 4.2,
'event_offset': 5.4
},
{
'event_label': 'class3',
'event_onset': 5.5,
'event_offset': 6.7
}
]
dcase2013metric = DCASE2013_EventDetection_Metrics(class_list=class_list)
print 'DCASE2013'
print 'Frame-based:', dcase2013metric.frame_based(system_output=system_output,
annotated_ground_truth=annotated_groundtruth)
print 'Event-based:', dcase2013metric.event_based(system_output=system_output,
annotated_ground_truth=annotated_groundtruth)
print 'Class-based:', dcase2013metric.class_based(system_output=system_output,
annotated_ground_truth=annotated_groundtruth)
dcase2016_metric = DCASE2016_EventDetection_SegmentBasedMetrics(class_list=class_list)
print 'DCASE2016'
print dcase2016_metric.evaluate(system_output=system_output, annotated_ground_truth=annotated_groundtruth).results()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
mit
|
pravsripad/mne-python
|
mne/tests/test_cov.py
|
4
|
34692
|
# Author: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import itertools as itt
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal, assert_allclose)
import pytest
import numpy as np
from scipy import linalg
from mne.cov import (regularize, whiten_evoked,
_auto_low_rank_model,
prepare_noise_cov, compute_whitener,
_regularized_covariance)
from mne import (read_cov, write_cov, Epochs, merge_events,
find_events, compute_raw_covariance,
compute_covariance, read_evokeds, compute_proj_raw,
pick_channels_cov, pick_types, make_ad_hoc_cov,
make_fixed_length_events, create_info, compute_rank)
from mne.channels import equalize_channels
from mne.datasets import testing
from mne.fixes import _get_args
from mne.io import read_raw_fif, RawArray, read_raw_ctf, read_info
from mne.io.pick import _DATA_CH_TYPES_SPLIT, pick_info
from mne.preprocessing import maxwell_filter
from mne.rank import _compute_rank_int
from mne.utils import requires_sklearn, catch_logging, assert_snr
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
cov_fname = op.join(base_dir, 'test-cov.fif')
cov_gz_fname = op.join(base_dir, 'test-cov.fif.gz')
cov_km_fname = op.join(base_dir, 'test-km-cov.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
ave_fname = op.join(base_dir, 'test-ave.fif')
erm_cov_fname = op.join(base_dir, 'test_erm-cov.fif')
hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
ctf_fname = op.join(testing.data_path(download=False), 'CTF',
'testdata_ctf.ds')
@pytest.mark.parametrize('proj', (True, False))
@pytest.mark.parametrize('pca', (True, 'white', False))
def test_compute_whitener(proj, pca):
"""Test properties of compute_whitener."""
raw = read_raw_fif(raw_fname).crop(0, 3).load_data()
raw.pick_types(meg=True, eeg=True, exclude=())
if proj:
raw.apply_proj()
else:
raw.del_proj()
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov = compute_raw_covariance(raw)
W, _, C = compute_whitener(cov, raw.info, pca=pca, return_colorer=True,
verbose='error')
n_channels = len(raw.ch_names)
n_reduced = len(raw.ch_names)
rank = n_channels - len(raw.info['projs'])
n_reduced = rank if pca is True else n_channels
assert W.shape == C.shape[::-1] == (n_reduced, n_channels)
# round-trip mults
round_trip = np.dot(W, C)
if pca is True:
assert_allclose(round_trip, np.eye(n_reduced), atol=1e-7)
elif pca == 'white':
# Our first few rows/cols are zeroed out in the white space
assert_allclose(round_trip[-rank:, -rank:],
np.eye(rank), atol=1e-7)
else:
assert pca is False
assert_allclose(round_trip, np.eye(n_channels), atol=0.05)
def test_cov_mismatch():
"""Test estimation with MEG<->Head mismatch."""
raw = read_raw_fif(raw_fname).crop(0, 5).load_data()
events = find_events(raw, stim_channel='STI 014')
raw.pick_channels(raw.ch_names[:5])
raw.add_proj([], remove_existing=True)
epochs = Epochs(raw, events, None, tmin=-0.2, tmax=0., preload=True)
for kind in ('shift', 'None'):
epochs_2 = epochs.copy()
# This should be fine
compute_covariance([epochs, epochs_2])
if kind == 'shift':
epochs_2.info['dev_head_t']['trans'][:3, 3] += 0.001
else: # None
epochs_2.info['dev_head_t'] = None
pytest.raises(ValueError, compute_covariance, [epochs, epochs_2])
compute_covariance([epochs, epochs_2], on_mismatch='ignore')
with pytest.raises(RuntimeWarning, match='transform mismatch'):
compute_covariance([epochs, epochs_2], on_mismatch='warn')
with pytest.raises(ValueError, match='Invalid value'):
compute_covariance(epochs, on_mismatch='x')
# This should work
epochs.info['dev_head_t'] = None
epochs_2.info['dev_head_t'] = None
compute_covariance([epochs, epochs_2], method=None)
def test_cov_order():
"""Test covariance ordering."""
raw = read_raw_fif(raw_fname)
raw.set_eeg_reference(projection=True)
info = raw.info
# add MEG channel with low enough index number to affect EEG if
# order is incorrect
info['bads'] += ['MEG 0113']
ch_names = [info['ch_names'][pick]
for pick in pick_types(info, meg=False, eeg=True)]
cov = read_cov(cov_fname)
# no avg ref present warning
prepare_noise_cov(cov, info, ch_names, verbose='error')
# big reordering
cov_reorder = cov.copy()
order = np.random.RandomState(0).permutation(np.arange(len(cov.ch_names)))
cov_reorder['names'] = [cov['names'][ii] for ii in order]
cov_reorder['data'] = cov['data'][order][:, order]
# Make sure we did this properly
_assert_reorder(cov_reorder, cov, order)
# Now check some functions that should get the same result for both
# regularize
with pytest.raises(ValueError, match='rank, if str'):
regularize(cov, info, rank='foo')
with pytest.raises(TypeError, match='rank must be'):
regularize(cov, info, rank=False)
with pytest.raises(TypeError, match='rank must be'):
regularize(cov, info, rank=1.)
cov_reg = regularize(cov, info, rank='full')
cov_reg_reorder = regularize(cov_reorder, info, rank='full')
_assert_reorder(cov_reg_reorder, cov_reg, order)
# prepare_noise_cov
cov_prep = prepare_noise_cov(cov, info, ch_names)
cov_prep_reorder = prepare_noise_cov(cov, info, ch_names)
_assert_reorder(cov_prep, cov_prep_reorder,
order=np.arange(len(cov_prep['names'])))
# compute_whitener
whitener, w_ch_names, n_nzero = compute_whitener(
cov, info, return_rank=True)
assert whitener.shape[0] == whitener.shape[1]
whitener_2, w_ch_names_2, n_nzero_2 = compute_whitener(
cov_reorder, info, return_rank=True)
assert_array_equal(w_ch_names_2, w_ch_names)
assert_allclose(whitener_2, whitener, rtol=1e-6)
assert n_nzero == n_nzero_2
# with pca
assert n_nzero < whitener.shape[0]
whitener_pca, w_ch_names_pca, n_nzero_pca = compute_whitener(
cov, info, pca=True, return_rank=True)
assert_array_equal(w_ch_names_pca, w_ch_names)
assert n_nzero_pca == n_nzero
assert whitener_pca.shape == (n_nzero_pca, len(w_ch_names))
# whiten_evoked
evoked = read_evokeds(ave_fname)[0]
evoked_white = whiten_evoked(evoked, cov)
evoked_white_2 = whiten_evoked(evoked, cov_reorder)
assert_allclose(evoked_white_2.data, evoked_white.data, atol=1e-7)
def _assert_reorder(cov_new, cov_orig, order):
"""Check that we get the same result under reordering."""
inv_order = np.argsort(order)
assert_array_equal([cov_new['names'][ii] for ii in inv_order],
cov_orig['names'])
assert_allclose(cov_new['data'][inv_order][:, inv_order],
cov_orig['data'], atol=1e-20)
def test_ad_hoc_cov(tmpdir):
"""Test ad hoc cov creation and I/O."""
out_fname = tmpdir.join('test-cov.fif')
evoked = read_evokeds(ave_fname)[0]
cov = make_ad_hoc_cov(evoked.info)
cov.save(out_fname)
assert 'Covariance' in repr(cov)
cov2 = read_cov(out_fname)
assert_array_almost_equal(cov['data'], cov2['data'])
std = dict(grad=2e-13, mag=10e-15, eeg=0.1e-6)
cov = make_ad_hoc_cov(evoked.info, std)
cov.save(out_fname)
assert 'Covariance' in repr(cov)
cov2 = read_cov(out_fname)
assert_array_almost_equal(cov['data'], cov2['data'])
cov['data'] = np.diag(cov['data'])
with pytest.raises(RuntimeError, match='attributes inconsistent'):
cov._get_square()
cov['diag'] = False
cov._get_square()
cov['data'] = np.diag(cov['data'])
with pytest.raises(RuntimeError, match='attributes inconsistent'):
cov._get_square()
def test_io_cov(tmpdir):
"""Test IO for noise covariance matrices."""
cov = read_cov(cov_fname)
cov['method'] = 'empirical'
cov['loglik'] = -np.inf
cov.save(tmpdir.join('test-cov.fif'))
cov2 = read_cov(tmpdir.join('test-cov.fif'))
assert_array_almost_equal(cov.data, cov2.data)
assert_equal(cov['method'], cov2['method'])
assert_equal(cov['loglik'], cov2['loglik'])
assert 'Covariance' in repr(cov)
cov2 = read_cov(cov_gz_fname)
assert_array_almost_equal(cov.data, cov2.data)
cov2.save(tmpdir.join('test-cov.fif.gz'))
cov2 = read_cov(tmpdir.join('test-cov.fif.gz'))
assert_array_almost_equal(cov.data, cov2.data)
cov['bads'] = ['EEG 039']
cov_sel = pick_channels_cov(cov, exclude=cov['bads'])
assert cov_sel['dim'] == (len(cov['data']) - len(cov['bads']))
assert cov_sel['data'].shape == (cov_sel['dim'], cov_sel['dim'])
cov_sel.save(tmpdir.join('test-cov.fif'))
cov2 = read_cov(cov_gz_fname)
assert_array_almost_equal(cov.data, cov2.data)
cov2.save(tmpdir.join('test-cov.fif.gz'))
cov2 = read_cov(tmpdir.join('test-cov.fif.gz'))
assert_array_almost_equal(cov.data, cov2.data)
# test warnings on bad filenames
cov_badname = tmpdir.join('test-bad-name.fif.gz')
with pytest.warns(RuntimeWarning, match='-cov.fif'):
write_cov(cov_badname, cov)
with pytest.warns(RuntimeWarning, match='-cov.fif'):
read_cov(cov_badname)
@pytest.mark.parametrize('method', (None, 'empirical', 'shrunk'))
def test_cov_estimation_on_raw(method, tmpdir):
"""Test estimation from raw (typically empty room)."""
if method == 'shrunk':
try:
import sklearn # noqa: F401
except Exception as exp:
pytest.skip('sklearn is required, got %s' % (exp,))
raw = read_raw_fif(raw_fname, preload=True)
cov_mne = read_cov(erm_cov_fname)
method_params = dict(shrunk=dict(shrinkage=[0]))
# The pure-string uses the more efficient numpy-based method, the
# the list gets triaged to compute_covariance (should be equivalent
# but use more memory)
with pytest.warns(None): # can warn about EEG ref
cov = compute_raw_covariance(
raw, tstep=None, method=method, rank='full',
method_params=method_params)
assert_equal(cov.ch_names, cov_mne.ch_names)
assert_equal(cov.nfree, cov_mne.nfree)
assert_snr(cov.data, cov_mne.data, 1e6)
# test equivalence with np.cov
cov_np = np.cov(raw.copy().pick_channels(cov['names']).get_data(), ddof=1)
if method != 'shrunk': # can check all
off_diag = np.triu_indices(cov_np.shape[0])
else:
# We explicitly zero out off-diag entries between channel types,
# so let's just check MEG off-diag entries
off_diag = np.triu_indices(len(pick_types(raw.info, meg=True,
exclude=())))
for other in (cov_mne, cov):
assert_allclose(np.diag(cov_np), np.diag(other.data), rtol=5e-6)
assert_allclose(cov_np[off_diag], other.data[off_diag], rtol=4e-3)
assert_snr(cov.data, other.data, 1e6)
# tstep=0.2 (default)
with pytest.warns(None): # can warn about EEG ref
cov = compute_raw_covariance(raw, method=method, rank='full',
method_params=method_params)
assert_equal(cov.nfree, cov_mne.nfree - 120) # cutoff some samples
assert_snr(cov.data, cov_mne.data, 170)
# test IO when computation done in Python
cov.save(tmpdir.join('test-cov.fif')) # test saving
cov_read = read_cov(tmpdir.join('test-cov.fif'))
assert cov_read.ch_names == cov.ch_names
assert cov_read.nfree == cov.nfree
assert_array_almost_equal(cov.data, cov_read.data)
# test with a subset of channels
raw_pick = raw.copy().pick_channels(raw.ch_names[:5])
raw_pick.info.normalize_proj()
cov = compute_raw_covariance(raw_pick, tstep=None, method=method,
rank='full', method_params=method_params)
assert cov_mne.ch_names[:5] == cov.ch_names
assert_snr(cov.data, cov_mne.data[:5, :5], 5e6)
cov = compute_raw_covariance(raw_pick, method=method, rank='full',
method_params=method_params)
assert_snr(cov.data, cov_mne.data[:5, :5], 90) # cutoff samps
# make sure we get a warning with too short a segment
raw_2 = read_raw_fif(raw_fname).crop(0, 1)
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov = compute_raw_covariance(raw_2, method=method,
method_params=method_params)
# no epochs found due to rejection
pytest.raises(ValueError, compute_raw_covariance, raw, tstep=None,
method='empirical', reject=dict(eog=200e-6))
# but this should work
with pytest.warns(None): # sklearn
cov = compute_raw_covariance(
raw.copy().crop(0, 10.), tstep=None, method=method,
reject=dict(eog=1000e-6), method_params=method_params,
verbose='error')
@pytest.mark.slowtest
@requires_sklearn
def test_cov_estimation_on_raw_reg():
"""Test estimation from raw with regularization."""
raw = read_raw_fif(raw_fname, preload=True)
raw.info['sfreq'] /= 10.
raw = RawArray(raw._data[:, ::10].copy(), raw.info) # decimate for speed
cov_mne = read_cov(erm_cov_fname)
with pytest.warns(RuntimeWarning, match='Too few samples'):
# XXX don't use "shrunk" here, for some reason it makes Travis 2.7
# hang... "diagonal_fixed" is much faster. Use long epochs for speed.
cov = compute_raw_covariance(raw, tstep=5., method='diagonal_fixed')
assert_snr(cov.data, cov_mne.data, 5)
def _assert_cov(cov, cov_desired, tol=0.005, nfree=True):
assert_equal(cov.ch_names, cov_desired.ch_names)
err = (linalg.norm(cov.data - cov_desired.data, ord='fro') /
linalg.norm(cov.data, ord='fro'))
assert err < tol, '%s >= %s' % (err, tol)
if nfree:
assert_equal(cov.nfree, cov_desired.nfree)
@pytest.mark.slowtest
@pytest.mark.parametrize('rank', ('full', None))
def test_cov_estimation_with_triggers(rank, tmpdir):
"""Test estimation from raw with triggers."""
raw = read_raw_fif(raw_fname)
raw.set_eeg_reference(projection=True).load_data()
events = find_events(raw, stim_channel='STI 014')
event_ids = [1, 2, 3, 4]
reject = dict(grad=10000e-13, mag=4e-12, eeg=80e-6, eog=150e-6)
# cov with merged events and keep_sample_mean=True
events_merged = merge_events(events, event_ids, 1234)
epochs = Epochs(raw, events_merged, 1234, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True,
reject=reject, preload=True)
cov = compute_covariance(epochs, keep_sample_mean=True)
cov_km = read_cov(cov_km_fname)
# adjust for nfree bug
cov_km['nfree'] -= 1
_assert_cov(cov, cov_km)
# Test with tmin and tmax (different but not too much)
cov_tmin_tmax = compute_covariance(epochs, tmin=-0.19, tmax=-0.01)
assert np.all(cov.data != cov_tmin_tmax.data)
err = (linalg.norm(cov.data - cov_tmin_tmax.data, ord='fro') /
linalg.norm(cov_tmin_tmax.data, ord='fro'))
assert err < 0.05
# cov using a list of epochs and keep_sample_mean=True
epochs = [Epochs(raw, events, ev_id, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True, reject=reject)
for ev_id in event_ids]
cov2 = compute_covariance(epochs, keep_sample_mean=True)
assert_array_almost_equal(cov.data, cov2.data)
assert cov.ch_names == cov2.ch_names
# cov with keep_sample_mean=False using a list of epochs
cov = compute_covariance(epochs, keep_sample_mean=False)
assert cov_km.nfree == cov.nfree
_assert_cov(cov, read_cov(cov_fname), nfree=False)
method_params = {'empirical': {'assume_centered': False}}
pytest.raises(ValueError, compute_covariance, epochs,
keep_sample_mean=False, method_params=method_params)
pytest.raises(ValueError, compute_covariance, epochs,
keep_sample_mean=False, method='shrunk', rank=rank)
# test IO when computation done in Python
cov.save(tmpdir.join('test-cov.fif')) # test saving
cov_read = read_cov(tmpdir.join('test-cov.fif'))
_assert_cov(cov, cov_read, 1e-5)
# cov with list of epochs with different projectors
epochs = [Epochs(raw, events[:1], None, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True),
Epochs(raw, events[:1], None, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=False)]
# these should fail
pytest.raises(ValueError, compute_covariance, epochs)
pytest.raises(ValueError, compute_covariance, epochs, projs=None)
# these should work, but won't be equal to above
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov = compute_covariance(epochs, projs=epochs[0].info['projs'])
with pytest.warns(RuntimeWarning, match='Too few samples'):
cov = compute_covariance(epochs, projs=[])
# test new dict support
epochs = Epochs(raw, events, dict(a=1, b=2, c=3, d=4), tmin=-0.01, tmax=0,
proj=True, reject=reject, preload=True)
with pytest.warns(RuntimeWarning, match='Too few samples'):
compute_covariance(epochs)
with pytest.warns(RuntimeWarning, match='Too few samples'):
compute_covariance(epochs, projs=[])
pytest.raises(TypeError, compute_covariance, epochs, projs='foo')
pytest.raises(TypeError, compute_covariance, epochs, projs=['foo'])
def test_arithmetic_cov():
"""Test arithmetic with noise covariance matrices."""
cov = read_cov(cov_fname)
cov_sum = cov + cov
assert_array_almost_equal(2 * cov.nfree, cov_sum.nfree)
assert_array_almost_equal(2 * cov.data, cov_sum.data)
assert cov.ch_names == cov_sum.ch_names
cov += cov
assert_array_almost_equal(cov_sum.nfree, cov.nfree)
assert_array_almost_equal(cov_sum.data, cov.data)
assert cov_sum.ch_names == cov.ch_names
def test_regularize_cov():
"""Test cov regularization."""
raw = read_raw_fif(raw_fname)
raw.info['bads'].append(raw.ch_names[0]) # test with bad channels
noise_cov = read_cov(cov_fname)
# Regularize noise cov
reg_noise_cov = regularize(noise_cov, raw.info,
mag=0.1, grad=0.1, eeg=0.1, proj=True,
exclude='bads', rank='full')
assert noise_cov['dim'] == reg_noise_cov['dim']
assert noise_cov['data'].shape == reg_noise_cov['data'].shape
assert np.mean(noise_cov['data'] < reg_noise_cov['data']) < 0.08
# make sure all args are represented
assert set(_DATA_CH_TYPES_SPLIT) - set(_get_args(regularize)) == set()
def test_whiten_evoked():
"""Test whitening of evoked data."""
evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0),
proj=True)
cov = read_cov(cov_fname)
###########################################################################
# Show result
picks = pick_types(evoked.info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
noise_cov = regularize(cov, evoked.info, grad=0.1, mag=0.1, eeg=0.1,
exclude='bads', rank='full')
evoked_white = whiten_evoked(evoked, noise_cov, picks, diag=True)
whiten_baseline_data = evoked_white.data[picks][:, evoked.times < 0]
mean_baseline = np.mean(np.abs(whiten_baseline_data), axis=1)
assert np.all(mean_baseline < 1.)
assert np.all(mean_baseline > 0.2)
# degenerate
cov_bad = pick_channels_cov(cov, include=evoked.ch_names[:10])
pytest.raises(RuntimeError, whiten_evoked, evoked, cov_bad, picks)
def test_regularized_covariance():
"""Test unchanged data with regularized_covariance."""
evoked = read_evokeds(ave_fname, condition=0, baseline=(None, 0),
proj=True)
data = evoked.data.copy()
# check that input data remain unchanged. gh-5698
_regularized_covariance(data)
assert_allclose(data, evoked.data, atol=1e-20)
@requires_sklearn
def test_auto_low_rank():
"""Test probabilistic low rank estimators."""
n_samples, n_features, rank = 400, 10, 5
sigma = 0.1
def get_data(n_samples, n_features, rank, sigma):
rng = np.random.RandomState(42)
W = rng.randn(n_features, n_features)
X = rng.randn(n_samples, rank)
U, _, _ = linalg.svd(W.copy())
X = np.dot(X, U[:, :rank].T)
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X += rng.randn(n_samples, n_features) * sigmas
return X
X = get_data(n_samples=n_samples, n_features=n_features, rank=rank,
sigma=sigma)
method_params = {'iter_n_components': [4, 5, 6]}
cv = 3
n_jobs = 1
mode = 'factor_analysis'
rescale = 1e8
X *= rescale
est, info = _auto_low_rank_model(X, mode=mode, n_jobs=n_jobs,
method_params=method_params,
cv=cv)
assert_equal(info['best'], rank)
X = get_data(n_samples=n_samples, n_features=n_features, rank=rank,
sigma=sigma)
method_params = {'iter_n_components': [n_features + 5]}
msg = ('You are trying to estimate %i components on matrix '
'with %i features.') % (n_features + 5, n_features)
with pytest.warns(RuntimeWarning, match=msg):
_auto_low_rank_model(X, mode=mode, n_jobs=n_jobs,
method_params=method_params, cv=cv)
@pytest.mark.slowtest
@pytest.mark.parametrize('rank', ('full', None, 'info'))
@requires_sklearn
def test_compute_covariance_auto_reg(rank):
"""Test automated regularization."""
raw = read_raw_fif(raw_fname, preload=True)
raw.resample(100, npad='auto') # much faster estimation
events = find_events(raw, stim_channel='STI 014')
event_ids = [1, 2, 3, 4]
reject = dict(mag=4e-12)
# cov with merged events and keep_sample_mean=True
events_merged = merge_events(events, event_ids, 1234)
# we need a few channels for numerical reasons in PCA/FA
picks = pick_types(raw.info, meg='mag', eeg=False)[:10]
raw.pick_channels([raw.ch_names[pick] for pick in picks])
raw.info.normalize_proj()
epochs = Epochs(
raw, events_merged, 1234, tmin=-0.2, tmax=0,
baseline=(-0.2, -0.1), proj=True, reject=reject, preload=True)
epochs = epochs.crop(None, 0)[:5]
method_params = dict(factor_analysis=dict(iter_n_components=[3]),
pca=dict(iter_n_components=[3]))
covs = compute_covariance(epochs, method='auto',
method_params=method_params,
return_estimators=True, rank=rank)
# make sure regularization produces structured differencess
diag_mask = np.eye(len(epochs.ch_names)).astype(bool)
off_diag_mask = np.invert(diag_mask)
for cov_a, cov_b in itt.combinations(covs, 2):
if (cov_a['method'] == 'diagonal_fixed' and
# here we have diagnoal or no regularization.
cov_b['method'] == 'empirical' and rank == 'full'):
assert not np.any(cov_a['data'][diag_mask] ==
cov_b['data'][diag_mask])
# but the rest is the same
assert_allclose(cov_a['data'][off_diag_mask],
cov_b['data'][off_diag_mask], rtol=1e-12)
else:
# and here we have shrinkage everywhere.
assert not np.any(cov_a['data'][diag_mask] ==
cov_b['data'][diag_mask])
assert not np.any(cov_a['data'][diag_mask] ==
cov_b['data'][diag_mask])
logliks = [c['loglik'] for c in covs]
assert np.diff(logliks).max() <= 0 # descending order
methods = ['empirical', 'ledoit_wolf', 'oas', 'shrunk', 'shrinkage']
if rank == 'full':
methods.extend(['factor_analysis', 'pca'])
with catch_logging() as log:
cov3 = compute_covariance(epochs, method=methods,
method_params=method_params, projs=None,
return_estimators=True, rank=rank,
verbose=True)
log = log.getvalue().split('\n')
if rank is None:
assert ' Setting small MAG eigenvalues to zero (without PCA)' in log
assert 'Reducing data rank from 10 -> 7' in log
else:
assert 'Reducing' not in log
method_names = [cov['method'] for cov in cov3]
best_bounds = [-45, -35]
bounds = [-55, -45] if rank == 'full' else best_bounds
for method in set(methods) - {'empirical', 'shrunk'}:
this_lik = cov3[method_names.index(method)]['loglik']
assert bounds[0] < this_lik < bounds[1]
this_lik = cov3[method_names.index('shrunk')]['loglik']
assert best_bounds[0] < this_lik < best_bounds[1]
this_lik = cov3[method_names.index('empirical')]['loglik']
bounds = [-110, -100] if rank == 'full' else best_bounds
assert bounds[0] < this_lik < bounds[1]
assert_equal({c['method'] for c in cov3}, set(methods))
cov4 = compute_covariance(epochs, method=methods,
method_params=method_params, projs=None,
return_estimators=False, rank=rank)
assert cov3[0]['method'] == cov4['method'] # ordering
# invalid prespecified method
pytest.raises(ValueError, compute_covariance, epochs, method='pizza')
# invalid scalings
pytest.raises(ValueError, compute_covariance, epochs, method='shrunk',
scalings=dict(misc=123))
def _cov_rank(cov, info, proj=True):
# ignore warnings about rank mismatches: sometimes we will intentionally
# violate the computed/info assumption, such as when using SSS with
# `rank='full'`
with pytest.warns(None):
return _compute_rank_int(cov, info=info, proj=proj)
@pytest.fixture(scope='module')
def raw_epochs_events():
"""Create raw, epochs, and events for tests."""
raw = read_raw_fif(raw_fname).set_eeg_reference(projection=True).crop(0, 3)
raw = maxwell_filter(raw, regularize=None) # heavily reduce the rank
assert raw.info['bads'] == [] # no bads
events = make_fixed_length_events(raw)
epochs = Epochs(raw, events, tmin=-0.2, tmax=0, preload=True)
return (raw, epochs, events)
@requires_sklearn
@pytest.mark.parametrize('rank', (None, 'full', 'info'))
def test_low_rank_methods(rank, raw_epochs_events):
"""Test low-rank covariance matrix estimation."""
epochs = raw_epochs_events[1]
sss_proj_rank = 139 # 80 MEG + 60 EEG - 1 proj
n_ch = 366
methods = ('empirical', 'diagonal_fixed', 'oas')
bounds = {
'None': dict(empirical=(-15000, -5000),
diagonal_fixed=(-1500, -500),
oas=(-700, -600)),
'full': dict(empirical=(-18000, -8000),
diagonal_fixed=(-2000, -1600),
oas=(-1600, -1000)),
'info': dict(empirical=(-15000, -5000),
diagonal_fixed=(-700, -600),
oas=(-700, -600)),
}
with pytest.warns(RuntimeWarning, match='Too few samples'):
covs = compute_covariance(
epochs, method=methods, return_estimators=True, rank=rank,
verbose=True)
for cov in covs:
method = cov['method']
these_bounds = bounds[str(rank)][method]
this_rank = _cov_rank(cov, epochs.info, proj=(rank != 'full'))
if rank == 'full' and method != 'empirical':
assert this_rank == n_ch
else:
assert this_rank == sss_proj_rank
assert these_bounds[0] < cov['loglik'] < these_bounds[1], \
(rank, method)
@requires_sklearn
def test_low_rank_cov(raw_epochs_events):
"""Test additional properties of low rank computations."""
raw, epochs, events = raw_epochs_events
sss_proj_rank = 139 # 80 MEG + 60 EEG - 1 proj
n_ch = 366
proj_rank = 365 # one EEG proj
with pytest.warns(RuntimeWarning, match='Too few samples'):
emp_cov = compute_covariance(epochs)
# Test equivalence with mne.cov.regularize subspace
with pytest.raises(ValueError, match='are dependent.*must equal'):
regularize(emp_cov, epochs.info, rank=None, mag=0.1, grad=0.2)
assert _cov_rank(emp_cov, epochs.info) == sss_proj_rank
reg_cov = regularize(emp_cov, epochs.info, proj=True, rank='full')
assert _cov_rank(reg_cov, epochs.info) == proj_rank
with pytest.warns(RuntimeWarning, match='exceeds the theoretical'):
_compute_rank_int(reg_cov, info=epochs.info)
del reg_cov
with catch_logging() as log:
reg_r_cov = regularize(emp_cov, epochs.info, proj=True, rank=None,
verbose=True)
log = log.getvalue()
assert 'jointly' in log
assert _cov_rank(reg_r_cov, epochs.info) == sss_proj_rank
reg_r_only_cov = regularize(emp_cov, epochs.info, proj=False, rank=None)
assert _cov_rank(reg_r_only_cov, epochs.info) == sss_proj_rank
assert_allclose(reg_r_only_cov['data'], reg_r_cov['data'])
del reg_r_only_cov, reg_r_cov
# test that rank=306 is same as rank='full'
epochs_meg = epochs.copy().pick_types(meg=True)
assert len(epochs_meg.ch_names) == 306
epochs_meg.info.update(bads=[], projs=[])
cov_full = compute_covariance(epochs_meg, method='oas',
rank='full', verbose='error')
assert _cov_rank(cov_full, epochs_meg.info) == 306
with pytest.warns(RuntimeWarning, match='few samples'):
cov_dict = compute_covariance(epochs_meg, method='oas',
rank=dict(meg=306))
assert _cov_rank(cov_dict, epochs_meg.info) == 306
assert_allclose(cov_full['data'], cov_dict['data'])
cov_dict = compute_covariance(epochs_meg, method='oas',
rank=dict(meg=306), verbose='error')
assert _cov_rank(cov_dict, epochs_meg.info) == 306
assert_allclose(cov_full['data'], cov_dict['data'])
# Work with just EEG data to simplify projection / rank reduction
raw = raw.copy().pick_types(meg=False, eeg=True)
n_proj = 2
raw.add_proj(compute_proj_raw(raw, n_eeg=n_proj))
n_ch = len(raw.ch_names)
rank = n_ch - n_proj - 1 # plus avg proj
assert len(raw.info['projs']) == 3
epochs = Epochs(raw, events, tmin=-0.2, tmax=0, preload=True)
assert len(raw.ch_names) == n_ch
emp_cov = compute_covariance(epochs, rank='full', verbose='error')
assert _cov_rank(emp_cov, epochs.info) == rank
reg_cov = regularize(emp_cov, epochs.info, proj=True, rank='full')
assert _cov_rank(reg_cov, epochs.info) == rank
reg_r_cov = regularize(emp_cov, epochs.info, proj=False, rank=None)
assert _cov_rank(reg_r_cov, epochs.info) == rank
dia_cov = compute_covariance(epochs, rank=None, method='diagonal_fixed',
verbose='error')
assert _cov_rank(dia_cov, epochs.info) == rank
assert_allclose(dia_cov['data'], reg_cov['data'])
epochs.pick_channels(epochs.ch_names[:103])
# degenerate
with pytest.raises(ValueError, match='can.*only be used with rank="full"'):
compute_covariance(epochs, rank=None, method='pca')
with pytest.raises(ValueError, match='can.*only be used with rank="full"'):
compute_covariance(epochs, rank=None, method='factor_analysis')
@testing.requires_testing_data
@requires_sklearn
def test_cov_ctf():
"""Test basic cov computation on ctf data with/without compensation."""
raw = read_raw_ctf(ctf_fname).crop(0., 2.).load_data()
events = make_fixed_length_events(raw, 99999)
assert len(events) == 2
ch_names = [raw.info['ch_names'][pick]
for pick in pick_types(raw.info, meg=True, eeg=False,
ref_meg=False)]
for comp in [0, 1]:
raw.apply_gradient_compensation(comp)
epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True)
with pytest.warns(RuntimeWarning, match='Too few samples'):
noise_cov = compute_covariance(epochs, tmax=0.,
method=['empirical'])
prepare_noise_cov(noise_cov, raw.info, ch_names)
raw.apply_gradient_compensation(0)
epochs = Epochs(raw, events, None, -0.2, 0.2, preload=True)
with pytest.warns(RuntimeWarning, match='Too few samples'):
noise_cov = compute_covariance(epochs, tmax=0., method=['empirical'])
raw.apply_gradient_compensation(1)
# TODO This next call in principle should fail.
prepare_noise_cov(noise_cov, raw.info, ch_names)
# make sure comps matrices was not removed from raw
assert raw.info['comps'], 'Comps matrices removed'
def test_equalize_channels():
"""Test equalization of channels for instances of Covariance."""
cov1 = make_ad_hoc_cov(create_info(['CH1', 'CH2', 'CH3', 'CH4'], sfreq=1.0,
ch_types='eeg'))
cov2 = make_ad_hoc_cov(create_info(['CH5', 'CH1', 'CH2'], sfreq=1.0,
ch_types='eeg'))
cov1, cov2 = equalize_channels([cov1, cov2])
assert cov1.ch_names == ['CH1', 'CH2']
assert cov2.ch_names == ['CH1', 'CH2']
def test_compute_whitener_rank():
"""Test risky rank options."""
info = read_info(ave_fname)
info = pick_info(info, pick_types(info, meg=True))
info['projs'] = []
# need a square version because the diag one takes shortcuts in
# compute_whitener (users shouldn't even need this function so it's
# private)
cov = make_ad_hoc_cov(info)._as_square()
assert len(cov['names']) == 306
_, _, rank = compute_whitener(cov, info, rank=None, return_rank=True)
assert rank == 306
assert compute_rank(cov, info=info, verbose=True) == dict(meg=rank)
cov['data'][-1] *= 1e-14 # trivially rank-deficient
_, _, rank = compute_whitener(cov, info, rank=None, return_rank=True)
assert rank == 305
assert compute_rank(cov, info=info, verbose=True) == dict(meg=rank)
# this should emit a warning
with pytest.warns(RuntimeWarning, match='exceeds the estimated'):
_, _, rank = compute_whitener(cov, info, rank=dict(meg=306),
return_rank=True)
assert rank == 306
|
bsd-3-clause
|
depet/scikit-learn
|
sklearn/gpml/gp.py
|
1
|
15571
|
import numpy
import scipy.optimize
from ..base import BaseEstimator
from . import cov
from . import inf
from . import lik
from . import mean
from . import util
class GP(BaseEstimator):
"""
The GP model class.
Parameters
----------
x : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) with the observations of the
scalar output to be predicted.
hyp : double array like or dictionary, optional
An array with shape (n_hyp, ) or dictionary containing keys 'mean',
'cov' and 'lik' and arrays with shape (n_hyp_KEY,) as
hyperparameter values.
inffunc : string or callable, optional
An inference function computing the (approximate) posterior for a
Gaussian process.
Available built-in inference functions are::
'exact', 'laplace', 'ep', 'vb', 'fitc', 'fitc_laplace', 'fitc_ep'
meanfunc : string or callable, optional
A mean function to be used by Gaussian process functions.
Available built-in simple mean functions are::
'zero', 'one', 'const', 'linear'
and composite mean functions are::
'mask', 'pow', 'prod', 'scale', 'sum'
covfunc : string or callable, optional
A covariance function to be used by Gaussian process functions.
Available built-in simple covariance functions are::
'const', 'lin', 'linard', 'linone', 'materniso', 'nnone',
'noise', 'periodic', 'poly', 'ppiso', 'rqard', 'rqiso',
'seard', 'seiso', 'seisou'
and composite covariance functions are::
'add', 'mask', 'prod', 'scale', 'sum'
and special purpose (wrapper) covariance functions::
'fitc'
likfunc : string or callable, optional
A likelihood function to be used by Gaussian process functions.
Available built-in likelihood functions are::
'erf', 'logistic', 'uni', 'gauss', 'laplace', 'sech2', 't',
'poisson'
and composite likelihood functions are::
'mix'
Examples
--------
>>> import numpy as np
>>> from sklearn.gpml import GP
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GP(X, y)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GP(beta0=None...
...
>>> Xs = numpy.array([numpy.linspace(-1,10,101)]).T
>>> mu, s2 = gp.predict(Xs)
Notes
-----
The implementation is based on a translation of the GPML
Matlab toolbox version 3.2, see reference [GPML32]_.
References
----------
.. [RN2013] `C.E. Rasmussen and H. Nickisch. The GPML Toolbox version 3.2. (2013)`
http://www.gaussianprocess.org/gpml/code/matlab/doc/manual.pdf
.. [RW2006] `C.E. Rasmussen and C.K.I. Williams (2006). Gaussian Processes for
Machine Learning. The MIT Press.`
http://www.gaussianprocess.org/gpml/chapters/RW.pdf
"""
_inference_functions = {
'exact': inf.exact}
_mean_functions = {
'zero': mean.zero}
_covariance_functions = {
'seard': cov.seArd}
_likelihood_functions = {
'gauss': lik.gauss}
__prnt_counter = 0
def __init__(self, x, y, hyp=None, inffunc=inf.exact, meanfunc=mean.zero, covfunc=cov.seArd, likfunc=lik.gauss):
self.x, self.y, self.N, self.D = self.__setData(x, y)
self.inff = self.__setFunc(inffunc, 'inference')
self.meanf = self.__setFunc(meanfunc, 'mean')
self.covf = self.__setFunc(covfunc, 'covariance')
self.likf = self.__setFunc(likfunc, 'likelihood')
self.hyp = self.__setHyp(hyp)
def fit(self, x, y, hyp0=None, maxiter=200):
"""
The GP model fitting method.
Parameters
----------
x : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) with the observations of the
scalar output to be predicted.
hyp0 : double array like or dictionary, optional
An array with shape (n_hyp, ) or dictionary containing keys 'mean',
'cov' and 'lik' and arrays with shape (n_hyp_KEY,) as initial
hyperparameter values.
Returns
-------
gp : self
A fitted GP model object awaiting data to perform
predictions.
"""
self.x, self.y, self.N, self.D = self.__setData(x, y, False)
if hyp0 is None:
hyp0 = self.hyp
else:
hyp0 = self.__setHyp(hyp0)
hyp0 = numpy.concatenate((numpy.reshape(hyp0['mean'],(-1,)), numpy.reshape(hyp0['cov'],(-1,)), numpy.reshape(hyp0['lik'],(-1,))))
self.__prnt_counter = 0
res = scipy.optimize.minimize(self._gp, hyp0, (self.inff, self.meanf, self.covf, self.likf, x, y), method='Newton-CG', jac=True, callback=self.__prnt, options={'maxiter': maxiter})
hyp = res.x
self.hyp = self.__setHyp(hyp)
return self
def __prnt(self, x):
self.__prnt_counter += 1
nlZ, dnlZ = self._gp(x, self.inff, self.meanf, self.covf, self.likf, self.x, self.y)
print 'Iteration %d; Value %f' % (self.__prnt_counter, nlZ)
def predict(self, xs, ys=None, batch_size=None, nargout=2):
"""
This function evaluates the GP model at x.
Parameters
----------
xs : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
ys : array_like, optional
An array with shape (n_eval, ) giving the real targets at ...
Default assumes ys = None and evaluates only the mean
prediction.
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
mu : array_like
An array with shape (n_eval, ) with the mean value of prediction at xs.
s2 : array_like
An array with shape (n_eval, ) with the variance of prediction at xs.
"""
if numpy.size(xs, 1) != self.D:
if numpy.size(xs,0) == self.D:
xs = xs.T
else:
raise AttributeError('Dimension of the test inputs (xs) disagree with dimension of the train inputs (x).')
res = self._gp(self.hyp, self.inff, self.meanf, self.covf, self.likf, self.x, self.y, xs, ys, nargout, batch_size=batch_size)
if nargout <= 1:
return res[0]
elif nargout <= 6:
return res[:nargout]
else:
return res[:6]
def _gp(self, hyp, inff, meanf, covf, likf, x, y, xs=None, ys=None, nargout=None, post=None, hypdict=None, batch_size=None):
if nargout is None:
nargout = 2
if not isinstance(meanf, tuple):
meanf = (meanf,)
if not isinstance(covf, tuple):
covf = (covf,)
if not isinstance(likf, tuple):
likf = (likf,)
if isinstance(inff, tuple):
inff = inff[0]
D = numpy.size(x,1);
if not isinstance(hyp, dict):
if hypdict is None:
hypdict = False
shp = numpy.shape(hyp)
hyp = numpy.reshape(hyp, (-1,1))
ms = eval(mean.feval(meanf))
hypmean = hyp[range(0,ms)]
cs = eval(cov.feval(covf))
hypcov = hyp[range(ms,cs)]
ls = eval(lik.feval(likf))
hyplik = hyp[range(cs,cs+ls)]
hyp = {'mean': hypmean, 'cov': hypcov, 'lik': hyplik}
else:
if hypdict is None:
hypdict = True
if 'mean' not in hyp:
hyp['mean'] = numpy.array([[]])
if eval(mean.feval(meanf)) != numpy.size(hyp['mean']):
raise AttributeError('Number of mean function hyperparameters disagree with mean function')
if 'cov' not in hyp:
hyp['cov'] = numpy.array([[]])
if eval(cov.feval(covf)) != numpy.size(hyp['cov']):
raise AttributeError('Number of cov function hyperparameters disagree with cov function')
if 'lik' not in hyp:
hyp['lik'] = numpy.array([[]])
if eval(lik.feval(likf)) != numpy.size(hyp['lik']):
raise AttributeError('Number of lik function hyperparameters disagree with lik function')
# call the inference method
try:
# issue a warning if a classification likelihood is used in conjunction with
# labels different from +1 and -1
if lik == lik.erf or lik == lik.logistic:
uy = numpy.unique(y)
if numpy.any(~(uy == -1) & ~(uy == 1)):
print 'You try classification with labels different from {+1,-1}'
# compute marginal likelihood and its derivatives only if needed
if xs is not None:
if post is None:
post = inff(hyp, meanf, covf, likf, x, y, nargout=1)
else:
if nargout == 1:
post, nlZ = inff(hyp, meanf, covf, likf, x, y, nargout=2)
else:
post, nlZ, dnlZ = inff(hyp, meanf, covf, likf, x, y, nargout=3)
except Exception, e:
if xs is not None:
raise Exception('Inference method failed [%s]' % (e,))
else:
print 'Warning: inference method failed [%s] .. attempting to continue' % (e,)
if hypdict:
dnlZ = {'cov': 0*hyp['cov'], 'mean': 0*hyp['mean'], 'lik': 0*hyp['lik']}
else:
if not hypdict:
dnlZ = numpy.concatenate((0*numpy.reshape(hyp['mean'],(-1,1)), 0*numpy.reshape(hyp['cov'],(-1,1)), 0*numpy.reshape(hyp['lik'],(-1,1))))
dnlZ = numpy.reshape(dnlZ, shp)
return (numpy.NaN, dnlZ)
if xs is None:
if nargout == 1:
return nlZ
else:
if not hypdict:
dnlZ = numpy.concatenate((numpy.reshape(dnlZ['mean'],(-1,1)), numpy.reshape(dnlZ['cov'],(-1,1)), numpy.reshape(dnlZ['lik'],(-1,1)))).T
dnlZ = numpy.reshape(dnlZ, shp)
if nargout == 2:
return (nlZ, dnlZ)
else:
return (nlZ, dnlZ, post)
else:
alpha = post['alpha']
L = post['L']
sW = post['sW']
if not True: #issparse(alpha)
nz = 0
else:
nz = numpy.tile(numpy.array([[True]]), (numpy.size(alpha,0),1))
# if L is not provided, we compute it
if numpy.size(L) == 0:
K = cov.feval(covf, hyp=hyp['cov'], x=x[nz[:,0],:])
L = numpy.linalg.cholesky(numpy.eye(numpy.sum(nz))+numpy.dot(sW,sW.T)*K).T
post['L'] = L # not in GPML, check if it is really needed
Ltril = numpy.all(numpy.tril(L,-1)==0)
ns = numpy.size(xs,0)
if batch_size is None:
batch_size = 1000
nact = 0
# allocate memory
ymu = numpy.zeros((ns,1))
ys2 = numpy.zeros((ns,1))
fmu = numpy.zeros((ns,1))
fs2 = numpy.zeros((ns,1))
lp = numpy.zeros((ns,1))
while nact < ns:
id = range(nact, min(nact+batch_size, ns))
kss = cov.feval(covf, hyp=hyp['cov'], x=xs[id,:], dg=True)
Ks = cov.feval(covf, hyp=hyp['cov'], x=x[nz[:,0],:], z=xs[id,:])
ms = mean.feval(meanf, hyp=hyp['mean'], x=xs[id,:])
N = numpy.size(alpha,1)
Fmu = numpy.tile(ms,(1,N)) + numpy.dot(Ks.T,alpha[nz[:,0],:])
fmu[id] = numpy.array([numpy.sum(Fmu,1)]).T/N
if Ltril:
V = numpy.linalg.solve(L.T, numpy.tile(sW,(1,len(id)))*Ks)
fs2[id] = kss - numpy.array([numpy.sum(V*V,0)]).T
else:
fs2[id] = kss + numpy.array([numpy.sum(Ks*numpy.dot(L,Ks),0)]).T
fs2[id] = numpy.maximum(fs2[id],0)
Fs2 = numpy.tile(fs2[id],(1,N))
if ys is None:
Lp, Ymu, Ys2 = lik.feval(likf, hyp['lik'], y=numpy.array([[]]), mu=numpy.reshape(Fmu, (-1,1)), s2=numpy.reshape(Fs2, (-1,1)))
else:
Lp, Ymu, Ys2 = lik.feval(likf, hyp['lik'], y=numpy.tile(ys[id], (1,N)), mu=numpy.reshape(Fmu, (-1,1)), s2=numpy.reshape(Fs2, (-1,1)))
lp[id] = numpy.array([numpy.sum(numpy.reshape(Lp, (-1,N)),1)/N]).T
ymu[id] = numpy.array([numpy.sum(numpy.reshape(Ymu, (-1,N)),1)/N]).T
ys2[id] = numpy.array([numpy.sum(numpy.reshape(Ys2, (-1, N)),1)/N]).T
nact = id[-1] + 1
if ys is None:
lp = numpy.array([[]])
if nargout == 1:
return ymu
elif nargout == 2:
return (ymu, ys2)
elif nargout == 3:
return (ymu, ys2, fmu)
elif nargout == 4:
return (ymu, ys2, fmu, fs2)
elif nargout == 5:
return (ymu, ys2, fmu, fs2, lp)
else:
return (ymu, ys2, fmu, fs2, lp, post)
def __setData(self, x, y, init=True):
if numpy.size(y, 0) > 1 and numpy.size(y, 1) > 1:
raise AttributeError('Only one-dimensional targets (y) are supported.')
y = numpy.reshape(y, (-1,1))
N = numpy.size(y, 0)
if numpy.size(x, 0) != N:
if numpy.size(x, 1) != N:
raise AttributeError('Number of inputs (x) and targets (y) must be the same.')
else:
x = x.T
D = numpy.size(x, 1)
if not init:
if D != self.D:
raise AttributeError('Input data (x) dimension disagree with hyperparameters.')
return (x, y, N, D)
def __setHyp(self, hyp):
D = self.D
ms = eval(mean.feval(self.meanf))
cs = eval(cov.feval(self.covf))
ls = eval(lik.feval(self.likf))
if hyp is None:
hyp = {}
hyp['mean'] = numpy.zeros((ms,1))
hyp['cov'] = numpy.zeros((cs,1))
hyp['lik'] = numpy.zeros((ls,1))
elif isinstance(hyp, dict):
if 'mean' not in hyp:
hyp['mean'] = numpy.array([[]])
if ms != numpy.size(hyp['mean']):
raise AttributeError('Number of mean function hyperparameters disagree with mean function.')
if 'cov' not in hyp:
hyp['cov'] = numpy.array([[]])
if cs != numpy.size(hyp['cov']):
raise AttributeError('Number of cov function hyperparameters disagree with cov function.')
if 'lik' not in hyp:
hyp['lik'] = numpy.array([[]])
if ls != numpy.size(hyp['lik']):
raise AttributeError('Number of lik function hyperparameters disagree with lik function.')
elif isinstance(hyp, numpy.ndarray):
hyp = numpy.reshape(hyp, (-1,1))
if ms + cs + ls == numpy.size(hyp, 0):
hypmean = hyp[range(0,ms)]
hypcov = hyp[range(ms,cs)]
hyplik = hyp[range(cs,cs+ls)]
hyp = {'mean': hypmean, 'cov': hypcov, 'lik': hyplik}
else:
raise AttributeError('Number of hyperparameters disagree with functions.')
else:
raise AttributeError('Unsupported type of hyperparameters.')
return hyp
def __setFunc(self, f, ftype, lower=False):
if ftype == 'inference':
fs = self._inference_functions
m = 'sklearn.gpml.inf'
elif ftype == 'mean':
fs = self._mean_functions
m = 'sklearn.gpml.mean'
elif ftype == 'covariance':
fs = self._covariance_functions
m = 'sklearn.gpml.cov'
elif ftype == 'likelihood':
fs = self._covariance_functions
m = 'sklearn.gpml.lik'
else:
raise AttributeError('Unknown function type.')
if not lower and not isinstance(f, tuple):
f = (f,)
resf = ()
for fp in f:
if isinstance(fp, basestring):
if fp.lower() in fs:
fp = fs[fp]
else:
raise AttributeError('Unknown %s function.' % ftype)
elif isinstance(fp, tuple) and ftype != 'inference':
fp = self.__setFunc(fp, ftype, True)
elif hasattr(fp, '__call__'):
if fp.__module__ != m:
raise AttributeError('%s function not from %s module.' % (ftype.capitalize(), m))
else:
raise AttributeError('Unknown %s function type.' % ftype)
resf = resf + (fp,)
if ftype == 'inference':
return fp
return resf
|
bsd-3-clause
|
hugobowne/scikit-learn
|
sklearn/manifold/tests/test_mds.py
|
324
|
1862
|
import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
|
bsd-3-clause
|
joonro/PyTables
|
doc/sphinxext/docscrape_sphinx.py
|
12
|
7768
|
import re, inspect, textwrap, pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::', ' :toctree:', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
#for param_list in ('Attributes', 'Methods'):
# out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
|
bsd-3-clause
|
smartscheduling/scikit-learn-categorical-tree
|
sklearn/covariance/tests/test_robust_covariance.py
|
213
|
3359
|
# Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.validation import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
|
bsd-3-clause
|
nomadcube/scikit-learn
|
examples/linear_model/plot_ols.py
|
220
|
1940
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
|
bsd-3-clause
|
dingliumath/quant-econ
|
examples/perm_inc_figs.py
|
7
|
1538
|
"""
Plots consumption, income and debt for the simple infinite horizon LQ
permanent income model with Gaussian iid income.
"""
import random
import numpy as np
import matplotlib.pyplot as plt
r = 0.05
beta = 1 / (1 + r)
T = 60
sigma = 0.15
mu = 1
def time_path():
w = np.random.randn(T+1) # w_0, w_1, ..., w_T
w[0] = 0
b = np.zeros(T+1)
for t in range(1, T+1):
b[t] = w[1:t].sum()
b = - sigma * b
c = mu + (1 - beta) * (sigma * w - b)
return w, b, c
# == Figure showing a typical realization == #
if 1:
fig, ax = plt.subplots()
p_args = {'lw': 2, 'alpha': 0.7}
ax.grid()
ax.set_xlabel(r'Time')
bbox = (0., 1.02, 1., .102)
legend_args = {'bbox_to_anchor': bbox, 'loc': 'upper left',
'mode': 'expand'}
w, b, c = time_path()
ax.plot(list(range(T+1)), mu + sigma * w, 'g-',
label="non-financial income", **p_args)
ax.plot(list(range(T+1)), c, 'k-', label="consumption", **p_args)
ax.plot(list(range(T+1)), b, 'b-', label="debt", **p_args)
ax.legend(ncol=3, **legend_args)
plt.show()
# == Figure showing multiple consumption paths == #
if 0:
fig, ax = plt.subplots()
p_args = {'lw': 0.8, 'alpha': 0.7}
ax.grid()
ax.set_xlabel(r'Time')
ax.set_ylabel(r'Consumption')
b_sum = np.zeros(T+1)
for i in range(250):
rcolor = random.choice(('c', 'g', 'b', 'k'))
w, b, c = time_path()
ax.plot(list(range(T+1)), c, color=rcolor, **p_args)
plt.show()
|
bsd-3-clause
|
jat255/seaborn
|
setup.py
|
22
|
3623
|
#! /usr/bin/env python
#
# Copyright (C) 2012-2014 Michael Waskom <[email protected]>
import os
# temporarily redirect config directory to prevent matplotlib importing
# testing that for writeable directory which results in sandbox error in
# certain easy_install versions
os.environ["MPLCONFIGDIR"] = "."
DESCRIPTION = "Seaborn: statistical data visualization"
LONG_DESCRIPTION = """\
Seaborn is a library for making attractive and informative statistical graphics in Python. It is built on top of matplotlib and tightly integrated with the PyData stack, including support for numpy and pandas data structures and statistical routines from scipy and statsmodels.
Some of the features that seaborn offers are
- Several built-in themes that improve on the default matplotlib aesthetics
- Tools for choosing color palettes to make beautiful plots that reveal patterns in your data
- Functions for visualizing univariate and bivariate distributions or for comparing them between subsets of data
- Tools that fit and visualize linear regression models for different kinds of independent and dependent variables
- Functions that visualize matrices of data and use clustering algorithms to discover structure in those matrices
- A function to plot statistical timeseries data with flexible estimation and representation of uncertainty around the estimate
- High-level abstractions for structuring grids of plots that let you easily build complex visualizations
"""
DISTNAME = 'seaborn'
MAINTAINER = 'Michael Waskom'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://stanford.edu/~mwaskom/software/seaborn/'
LICENSE = 'BSD (3-clause)'
DOWNLOAD_URL = 'https://github.com/mwaskom/seaborn/'
VERSION = '0.7.0.dev'
try:
from setuptools import setup
_has_setuptools = True
except ImportError:
from distutils.core import setup
def check_dependencies():
install_requires = []
# Just make sure dependencies exist, I haven't rigorously
# tested what the minimal versions that will work are
# (help on that would be awesome)
try:
import numpy
except ImportError:
install_requires.append('numpy')
try:
import scipy
except ImportError:
install_requires.append('scipy')
try:
import matplotlib
except ImportError:
install_requires.append('matplotlib')
try:
import pandas
except ImportError:
install_requires.append('pandas')
return install_requires
if __name__ == "__main__":
install_requires = check_dependencies()
setup(name=DISTNAME,
author=MAINTAINER,
author_email=MAINTAINER_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
install_requires=install_requires,
packages=['seaborn', 'seaborn.external', 'seaborn.tests'],
classifiers=[
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: BSD License',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Multimedia :: Graphics',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'],
)
|
bsd-3-clause
|
ChanderG/scikit-learn
|
doc/sphinxext/gen_rst.py
|
142
|
40026
|
"""
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
import warnings
from sklearn.externals import six
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename, encoding='utf-8') as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)
try:
basestring
except NameError:
basestring = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
(%(time_m) .0f minutes %(time_s) .2f seconds)
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600),
'plot_outlier_detection_001.png': (3, 372),
'plot_gp_regression_001.png': (2, 250),
'plot_adaboost_twoclass_001.png': (1, 372),
'plot_compare_methods_001.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
if six.PY2:
lines = open(filename).readlines()
else:
lines = open(filename, encoding='utf-8').readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery.\n"
"Please check the layout of your"
" example file:\n {}\n and make sure"
" it's correct".format(filename))
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',
'examples'))
generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'modules', 'generated'))
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
# we create an index.rst with all examples
fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')
# Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
/* hide the sidebar collapser, while ensuring vertical arrangement */
display: none;
}
</style>
.. _examples-index:
Examples
========
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
seen_backrefs = set()
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
for directory in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, directory)):
generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
if six.PY2:
lines = open(example_file).readlines()
else:
lines = open(example_file, encoding='utf-8').readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif (tok_type == 'STRING') and check_docstring:
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = [x for x in file_list if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def _thumbnail_div(subdir, full_dir, fname, snippet):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
if ref_name.startswith('._'):
ref_name = ref_name[2:]
out = []
out.append("""
.. raw:: html
<div class="thumbnailContainer" tooltip="{}">
""".format(snippet))
out.append('.. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if full_dir != '.':
out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3]))
else:
out.append(' :target: ./%s.html\n\n' % link_name[:-3])
out.append(""" :ref:`example_%s`
.. raw:: html
</div>
""" % (ref_name))
return ''.join(out)
def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not directory == '.':
target_dir = os.path.join(root_dir, directory)
src_dir = os.path.join(example_dir, directory)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' %
src_dir)
fhindex.write("""
%s
""" % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
if not os.path.exists(os.path.join(directory, 'images', 'thumb')):
os.makedirs(os.path.join(directory, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, snippet, _ = extract_docstring(new_fname, True)
fhindex.write(_thumbnail_div(directory, directory, fname, snippet))
fhindex.write("""
.. toctree::
:hidden:
%s/%s
""" % (directory, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
# heading
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)),
file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', directory)
ex_file.write(_thumbnail_div(directory, rel_dir, fname, snippet))
seen_backrefs.add(backref)
fhindex.write("""
.. raw:: html
<div class="clearer"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code
Only retains names from imported modules.
"""
def __init__(self):
super(NameFinder, self).__init__()
self.imported_names = {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name
def identify_names(code):
"""Builds a codeobj summary by identifying and resovles used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
module, attribute = full_name.rsplit('.', 1)
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
Returns the set of sklearn functions/classes imported in the example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip().expandtabs()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_mngr.num)
kwargs = {}
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr):
kwargs[attr] = fig_attr
fig.savefig(image_path % fig_mngr.num, **kwargs)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path.replace("%03d",
'[0-9][0-9][0-9]'))]
figure_list.sort()
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
time_m, time_s = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
f.write(this_template % locals())
f.flush()
# save variables so we can later add links to the documentation
if six.PY2:
example_code_obj = identify_names(open(example_file).read())
else:
example_code_obj = \
identify_names(open(example_file, encoding='utf-8').read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set('{module_short}.{name}'.format(**entry)
for entry in example_code_obj.values()
if entry['module'].startswith('sklearn'))
return backrefs
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
if exception is not None:
return
print('Embedding documentation hyperlinks in examples..')
if app.builder.name == 'latex':
# Don't embed hyperlinks when a latex builder is used.
return
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
resolver_urls = {
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
}
for this_module, url in resolver_urls.items():
try:
doc_resolvers[this_module] = SphinxDocLinkResolver(url)
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
print(e.code)
except URLError as e:
print("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding `{0}` links due to a URL "
"Error:\n".format(this_module))
print(e.args)
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print('\tprocessing: %s' % fname)
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.items():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
try:
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
except (HTTPError, URLError) as e:
print("The following error has occurred:\n")
print(repr(e))
continue
if link is not None:
parts = name.split('.')
name_html = period.join(orig_pattern % part
for part in parts)
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
expr = re.compile(r'(?<!\.)\b' + # don't follow . or word
'|'.join(re.escape(name)
for name in names))
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
line = expr.sub(substitute_link, line)
fid.write(line.encode('utf-8'))
print('[done]')
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
def setup_module():
# HACK: Stop nosetests running setup() above
pass
|
bsd-3-clause
|
rkmaddox/mne-python
|
examples/decoding/decoding_xdawn_eeg.py
|
6
|
4127
|
"""
.. _ex-xdawn-decoding:
============================
XDAWN Decoding From EEG data
============================
ERP decoding with Xdawn :footcite:`RivetEtAl2009,RivetEtAl2011`. For each event
type, a set of spatial Xdawn filters are trained and applied on the signal.
Channels are concatenated and rescaled to create features vectors that will be
fed into a logistic regression.
"""
# Authors: Alexandre Barachant <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.preprocessing import MinMaxScaler
from mne import io, pick_types, read_events, Epochs, EvokedArray
from mne.datasets import sample
from mne.preprocessing import Xdawn
from mne.decoding import Vectorizer
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters and read data
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.1, 0.3
event_id = {'Auditory/Left': 1, 'Auditory/Right': 2,
'Visual/Left': 3, 'Visual/Right': 4}
n_filter = 3
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 20, fir_design='firwin')
events = read_events(event_fname)
picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
exclude='bads')
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=False,
picks=picks, baseline=None, preload=True,
verbose=False)
# Create classification pipeline
clf = make_pipeline(Xdawn(n_components=n_filter),
Vectorizer(),
MinMaxScaler(),
LogisticRegression(penalty='l1', solver='liblinear',
multi_class='auto'))
# Get the labels
labels = epochs.events[:, -1]
# Cross validator
cv = StratifiedKFold(n_splits=10, shuffle=True, random_state=42)
# Do cross-validation
preds = np.empty(len(labels))
for train, test in cv.split(epochs, labels):
clf.fit(epochs[train], labels[train])
preds[test] = clf.predict(epochs[test])
# Classification report
target_names = ['aud_l', 'aud_r', 'vis_l', 'vis_r']
report = classification_report(labels, preds, target_names=target_names)
print(report)
# Normalized confusion matrix
cm = confusion_matrix(labels, preds)
cm_normalized = cm.astype(float) / cm.sum(axis=1)[:, np.newaxis]
# Plot confusion matrix
fig, ax = plt.subplots(1)
im = ax.imshow(cm_normalized, interpolation='nearest', cmap=plt.cm.Blues)
ax.set(title='Normalized Confusion matrix')
fig.colorbar(im)
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
fig.tight_layout()
ax.set(ylabel='True label', xlabel='Predicted label')
###############################################################################
# The ``patterns_`` attribute of a fitted Xdawn instance (here from the last
# cross-validation fold) can be used for visualization.
fig, axes = plt.subplots(nrows=len(event_id), ncols=n_filter,
figsize=(n_filter, len(event_id) * 2))
fitted_xdawn = clf.steps[0][1]
tmp_info = epochs.info.copy()
tmp_info['sfreq'] = 1.
for ii, cur_class in enumerate(sorted(event_id)):
cur_patterns = fitted_xdawn.patterns_[cur_class]
pattern_evoked = EvokedArray(cur_patterns[:n_filter].T, tmp_info, tmin=0)
pattern_evoked.plot_topomap(
times=np.arange(n_filter),
time_format='Component %d' if ii == 0 else '', colorbar=False,
show_names=False, axes=axes[ii], show=False)
axes[ii, 0].set(ylabel=cur_class)
fig.tight_layout(h_pad=1.0, w_pad=1.0, pad=0.1)
###############################################################################
# References
# ----------
# .. footbibliography::
|
bsd-3-clause
|
HBNLdev/DataStore
|
db/avg.py
|
1
|
1489
|
import os
import h5py
import pandas as pd
from .file_handling import parse_filename
def add_avgh1paths(df):
pass
class EmptyStackError(Exception):
def __init__(s):
print('all files in the stack were missing')
class AVGH1Stack:
''' represent a list of .avg.h1's as a stacked array '''
def __init__(s, path_lst):
s.init_df(path_lst)
def init_df(s, path_lst):
row_lst = []
missing_count = 0
for fp in path_lst:
if os.path.exists(fp):
avgh1 = AVGH1(fp)
row_lst.append(avgh1.info)
else:
missing_count += 1
if row_lst:
print(missing_count, 'files missing')
else:
raise EmptyStackError
s.data_df = pd.DataFrame.from_records(row_lst)
s.data_df.set_index(['ID', 'session', 'powertype', 'experiment', 'condition'], inplace=True)
s.data_df.sort_index(inplace=True)
class AVGH1:
''' represents a single .avg.h1 file '''
def __init__(s, filepath):
s.filepath = filepath
s.filename = os.path.split(s.filepath)[1]
s.file_info = parse_filename(s.filename)
def load(s):
s.loaded = h5py.File(s.filepath, 'r')
s.electrodes = [st.decode() for st in list(s.loaded['file']['run']['run'])[0][-2]]
s.electrodes_61 = s.electrodes[0:31] + s.electrodes[32:62]
s.samp_freq = 256
|
gpl-3.0
|
RachitKansal/scikit-learn
|
sklearn/tests/test_common.py
|
70
|
7717
|
"""
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <[email protected]>
# Gael Varoquaux [email protected]
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import pkgutil
from sklearn.externals.six import PY3
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.cluster.bicluster import BiclusterMixin
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
_yield_all_checks,
CROSS_DECOMPOSITION,
check_parameters_default_constructible,
check_class_weight_balanced_linear_classifier,
check_transformer_n_iter,
check_non_transformer_estimators_n_iter,
check_get_params_invariance,
check_fit2d_predict1d,
check_fit1d_1sample)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, clonable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield check_parameters_default_constructible, name, Estimator
def test_non_meta_estimators():
# input validation etc for non-meta estimators
estimators = all_estimators()
for name, Estimator in estimators:
if issubclass(Estimator, BiclusterMixin):
continue
if name.startswith("_"):
continue
for check in _yield_all_checks(name, Estimator):
yield check, name, Estimator
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_balanced_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if 'class_weight' in clazz().get_params().keys()
and issubclass(clazz, LinearClassifierMixin)]
for name, Classifier in linear_classifiers:
if name == "LogisticRegressionCV":
# Contrary to RidgeClassifierCV, LogisticRegressionCV use actual
# CV folds and fit a model for each CV iteration before averaging
# the coef. Therefore it is expected to not behave exactly as the
# other linear model.
continue
yield check_class_weight_balanced_linear_classifier, name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif (name in CROSS_DECOMPOSITION or
name in ['LinearSVC', 'LogisticRegression']):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (check_non_transformer_estimators_n_iter,
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
yield check_transformer_n_iter, name, estimator
def test_get_params_invariance():
# Test for estimators that support get_params, that
# get_params(deep=False) is a subset of get_params(deep=True)
# Related to issue #4465
estimators = all_estimators(include_meta_estimators=False, include_other=True)
for name, Estimator in estimators:
if hasattr(Estimator, 'get_params'):
yield check_get_params_invariance, name, Estimator
|
bsd-3-clause
|
wesm/statsmodels
|
scikits/statsmodels/sandbox/examples/ex_kaplan_meier.py
|
1
|
2730
|
#An example for the Kaplan-Meier estimator
import scikits.statsmodels.api as sm
import matplotlib.pyplot as plt
import numpy as np
from scikits.statsmodels.sandbox.survival2 import KaplanMeier
#Getting the strike data as an array
dta = sm.datasets.strikes.load()
print 'basic data'
print '\n'
dta = dta.values()[-1]
print dta[range(5),:]
print '\n'
#Create the KaplanMeier object and fit the model
km = KaplanMeier(dta,0)
km.fit()
#show the results
km.plot()
print 'basic model'
print '\n'
km.summary()
print '\n'
#Mutiple survival curves
km2 = KaplanMeier(dta,0,exog=1)
km2.fit()
print 'more than one curve'
print '\n'
km2.summary()
print '\n'
km2.plot()
#with censoring
censoring = np.ones_like(dta[:,0])
censoring[dta[:,0] > 80] = 0
dta = np.c_[dta,censoring]
print 'with censoring'
print '\n'
print dta[range(5),:]
print '\n'
km3 = KaplanMeier(dta,0,exog=1,censoring=2)
km3.fit()
km3.summary()
print '\n'
km3.plot()
#Test for difference of survival curves
log_rank = km3.test_diff([0.0645,-0.03957])
print 'log rank test'
print '\n'
print log_rank
print '\n'
#The zeroth element of log_rank is the chi-square test statistic
#for the difference between the survival curves for exog = 0.0645
#and exog = -0.03957, the index one element is the degrees of freedom for
#the test, and the index two element is the p-value for the test
wilcoxon = km3.test_diff([0.0645,-0.03957], rho=1)
print 'Wilcoxon'
print '\n'
print wilcoxon
print '\n'
#Same info as log_rank, but for Peto and Peto modification to the
#Gehan-Wilcoxon test
#User specified functions for tests
#A wider range of rates can be accessed by using the 'weight' parameter
#for the test_diff method
#For example, if the desire weights are S(t)*(1-S(t)), where S(t) is a pooled
#estimate for the survival function, this could be computed by doing
def weights(t):
#must accept one arguement, even though it is not used here
s = KaplanMeier(dta,0,censoring=2)
s.fit()
s = s.results[0][0]
s = s * (1 - s)
return s
#KaplanMeier provides an array of times to the weighting function
#internally, so the weighting function must accept one arguement
test = km3.test_diff([0.0645,-0.03957], weight=weights)
print 'user specified weights'
print '\n'
print test
print '\n'
#Groups with nan names
#These can be handled by passing the data to KaplanMeier as an array of strings
groups = np.ones_like(dta[:,1])
groups = groups.astype('S4')
groups[dta[:,1] > 0] = 'high'
groups[dta[:,1] <= 0] = 'low'
dta = dta.astype('S4')
dta[:,1] = groups
print 'with nan group names'
print '\n'
print dta[range(5),:]
print '\n'
km4 = KaplanMeier(dta,0,exog=1,censoring=2)
km4.fit()
km4.summary()
print '\n'
km4.plot()
#show all the plots
plt.show()
|
bsd-3-clause
|
eg-zhang/scikit-learn
|
sklearn/decomposition/tests/test_incremental_pca.py
|
297
|
8265
|
"""Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
|
bsd-3-clause
|
zorojean/scikit-learn
|
sklearn/preprocessing/__init__.py
|
268
|
1319
|
"""
The :mod:`sklearn.preprocessing` module includes scaling, centering,
normalization, binarization and imputation methods.
"""
from ._function_transformer import FunctionTransformer
from .data import Binarizer
from .data import KernelCenterer
from .data import MinMaxScaler
from .data import MaxAbsScaler
from .data import Normalizer
from .data import RobustScaler
from .data import StandardScaler
from .data import add_dummy_feature
from .data import binarize
from .data import normalize
from .data import scale
from .data import robust_scale
from .data import maxabs_scale
from .data import minmax_scale
from .data import OneHotEncoder
from .data import PolynomialFeatures
from .label import label_binarize
from .label import LabelBinarizer
from .label import LabelEncoder
from .label import MultiLabelBinarizer
from .imputation import Imputer
__all__ = [
'Binarizer',
'FunctionTransformer',
'Imputer',
'KernelCenterer',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'PolynomialFeatures',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
'label_binarize',
]
|
bsd-3-clause
|
karstenw/nodebox-pyobjc
|
examples/Extended Application/matplotlib/examples/userdemo/annotate_simple03.py
|
1
|
1365
|
"""
=================
Annotate Simple03
=================
"""
import matplotlib.pyplot as plt
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
fig, ax = plt.subplots(figsize=(3, 3))
ann = ax.annotate("Test",
xy=(0.2, 0.2), xycoords='data',
xytext=(0.8, 0.8), textcoords='data',
size=20, va="center", ha="center",
bbox=dict(boxstyle="round4", fc="w"),
arrowprops=dict(arrowstyle="-|>",
connectionstyle="arc3,rad=-0.2",
fc="w"),
)
pltshow(plt)
|
mit
|
juanshishido/project-eta
|
code/utils/searchlight.py
|
3
|
5199
|
from __future__ import print_function, division
from itertools import product
import numpy as np
import pandas as pd
def inrange(arr, b, f, ind):
"""Ensuring slices are possible.
Parameters
----------
arr: np.ndarray
2d or 3d input array
b: int
The backward adjusted value
f: int
The forward adjusted value
ind: int
The index of `arr` to consider
Returns:
b, f: tuple
The corrected slice values
"""
d_min = 0
d_max = arr.shape[ind] - 1
if b < d_min:
b = 0
if f > d_max:
f = d_max
return b, f
def sphere(arr, c, r=1):
"""Selecting adjacent voxels in 3d space, across time
Parameters
----------
arr: np.ndarray
4d input array
c: tuple (x, y, z)
The center voxel from which the volume is created
r: int
The radius for the sphere
Returns
-------
adjacent: np.ndarray
A sphere for every time time slice
Examples
--------
>>> X = np.arange(54).reshape(3, 3, 3, 2)
>>> sphere(X, (1, 1, 1))
array([[[[ 0, 1],
[ 2, 3],
[ 4, 5]],
<BLANKLINE>
[[ 6, 7],
[ 8, 9],
[10, 11]],
<BLANKLINE>
[[12, 13],
[14, 15],
[16, 17]]],
<BLANKLINE>
<BLANKLINE>
[[[18, 19],
[20, 21],
[22, 23]],
<BLANKLINE>
[[24, 25],
[26, 27],
[28, 29]],
<BLANKLINE>
[[30, 31],
[32, 33],
[34, 35]]],
<BLANKLINE>
<BLANKLINE>
[[[36, 37],
[38, 39],
[40, 41]],
<BLANKLINE>
[[42, 43],
[44, 45],
[46, 47]],
<BLANKLINE>
[[48, 49],
[50, 51],
[52, 53]]]])
>>> Y = np.arange(5**4).reshape(5, 5, 5, 5)
>>> sphere(Y, (2, 2, 2))
array([[[[155, 156, 157, 158, 159],
[160, 161, 162, 163, 164],
[165, 166, 167, 168, 169]],
<BLANKLINE>
[[180, 181, 182, 183, 184],
[185, 186, 187, 188, 189],
[190, 191, 192, 193, 194]],
<BLANKLINE>
[[205, 206, 207, 208, 209],
[210, 211, 212, 213, 214],
[215, 216, 217, 218, 219]]],
<BLANKLINE>
<BLANKLINE>
[[[280, 281, 282, 283, 284],
[285, 286, 287, 288, 289],
[290, 291, 292, 293, 294]],
<BLANKLINE>
[[305, 306, 307, 308, 309],
[310, 311, 312, 313, 314],
[315, 316, 317, 318, 319]],
<BLANKLINE>
[[330, 331, 332, 333, 334],
[335, 336, 337, 338, 339],
[340, 341, 342, 343, 344]]],
<BLANKLINE>
<BLANKLINE>
[[[405, 406, 407, 408, 409],
[410, 411, 412, 413, 414],
[415, 416, 417, 418, 419]],
<BLANKLINE>
[[430, 431, 432, 433, 434],
[435, 436, 437, 438, 439],
[440, 441, 442, 443, 444]],
<BLANKLINE>
[[455, 456, 457, 458, 459],
[460, 461, 462, 463, 464],
[465, 466, 467, 468, 469]]]])
>>> (sphere(X, (1, 1, 1)) == X).all()
True
>>> (sphere(Y, (2, 2, 2), 2) == Y).all()
True
"""
# checking type
assert isinstance(arr, np.ndarray), 'input array must be type np.ndarray'
assert isinstance(c, tuple), 'center must be type tuple'
assert isinstance(r, int), 'radius must be type int'
# checking size
assert len(arr.shape) == 4, 'array must be 4-dimensional'
assert len(c) == 3, 'tuple length must be 3'
assert len(c) == len(arr.shape)-1, 'tuple length must equal array dim'
assert r < min(arr.shape[:-1]), 'radius must be smaller than the '+\
'smallest dimension of the first three axes'
# checking valid `c` for `arr`; IndexError raised if not
arr[c]
b = lambda v, r: v - r
f = lambda v, r: v + r
# indices
x, y, z = c[0], c[1], c[2]
# reach
xb, xf = b(x, r), f(x, r)
yb, yf = b(y, r), f(y, r)
zb, zf = b(z, r), f(z, r)
# checking within dimensions
xb, xf = inrange(arr, xb, xf, 0)
yb, yf = inrange(arr, yb, yf, 1)
zb, zf = inrange(arr, zb, zf, 2)
return arr[xb:xf+1, yb:yf+1, zb:zf+1]
def nonzero_indices(arr):
"""Get the 3d indices for `arr` (4d) where the value is nonzero
Parameters
----------
arr : np.ndarray
The image data returned from calling `nib.load(f).get_data()`
Returns
-------
nonzeros : list
A list of tuples where the tuples are 3d indices
Examples
--------
>>> np.random.seed(42)
>>> X = np.random.randn(16).reshape(2, 2, 2, 2)
>>> Y = np.ones((2, 2, 2, 2))
>>> Z = np.round(X - Y).astype(int)
>>> nonzero_indices(Z)[0]
array([0, 0, 0])
>>> nonzero_indices(Z)[4]
array([1, 0, 0])
"""
nz = np.transpose(np.nonzero(arr)[:-1])
df = pd.DataFrame(nz)
df.drop_duplicates(inplace=True)
nonzeros = df.values
return nonzeros
if __name__ == '__main__':
import doctest
doctest.testmod()
|
bsd-3-clause
|
tp199911/PyTables
|
doc/sphinxext/docscrape_sphinx.py
|
12
|
7768
|
import re, inspect, textwrap, pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::', ' :toctree:', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
#for param_list in ('Attributes', 'Methods'):
# out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
|
bsd-3-clause
|
mrcslws/htmresearch
|
projects/sequence_prediction/continuous_sequence/run_tm_model.py
|
3
|
16411
|
## ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import importlib
from optparse import OptionParser
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.model_factory import ModelFactory
from nupic.frameworks.opf.predictionmetricsmanager import MetricsManager
from nupic.frameworks.opf import metrics
# from htmresearch.frameworks.opf.clamodel_custom import CLAModel_custom
import nupic_output
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import yaml
from htmresearch.support.sequence_learning_utils import *
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
rcParams['pdf.fonttype'] = 42
plt.ion()
DATA_DIR = "./data"
MODEL_PARAMS_DIR = "./model_params"
def getMetricSpecs(predictedField, stepsAhead=5):
_METRIC_SPECS = (
MetricSpec(field=predictedField, metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': 'negativeLogLikelihood',
'window': 1000, 'steps': stepsAhead}),
MetricSpec(field=predictedField, metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': 'nrmse', 'window': 1000,
'steps': stepsAhead}),
)
return _METRIC_SPECS
def createModel(modelParams):
model = ModelFactory.create(modelParams)
model.enableInference({"predictedField": predictedField})
return model
def getModelParamsFromName(dataSet):
if (dataSet == "nyc_taxi" or
dataSet == "nyc_taxi_perturb" or
dataSet == "nyc_taxi_perturb_baseline"):
importedModelParams = yaml.safe_load(open('model_params/nyc_taxi_model_params.yaml'))
else:
raise Exception("No model params exist for {}".format(dataSet))
return importedModelParams
def _getArgs():
parser = OptionParser(usage="%prog PARAMS_DIR OUTPUT_DIR [options]"
"\n\nCompare TM performance with trivial predictor using "
"model outputs in prediction directory "
"and outputting results to result directory.")
parser.add_option("-d",
"--dataSet",
type=str,
default='nyc_taxi',
dest="dataSet",
help="DataSet Name, choose from rec-center-hourly, nyc_taxi")
parser.add_option("-p",
"--plot",
default=False,
dest="plot",
help="Set to True to plot result")
parser.add_option("--stepsAhead",
help="How many steps ahead to predict. [default: %default]",
default=5,
type=int)
parser.add_option("-c",
"--classifier",
type=str,
default='SDRClassifierRegion',
dest="classifier",
help="Classifier Type: SDRClassifierRegion or CLAClassifierRegion")
(options, remainder) = parser.parse_args()
print options
return options, remainder
def getInputRecord(df, predictedField, i):
inputRecord = {
predictedField: float(df[predictedField][i]),
"timeofday": float(df["timeofday"][i]),
"dayofweek": float(df["dayofweek"][i]),
}
return inputRecord
def printTPRegionParams(tpregion):
"""
Note: assumes we are using TemporalMemory/TPShim in the TPRegion
"""
tm = tpregion.getSelf()._tfdr
print "------------PY TemporalMemory Parameters ------------------"
print "numberOfCols =", tm.getColumnDimensions()
print "cellsPerColumn =", tm.getCellsPerColumn()
print "minThreshold =", tm.getMinThreshold()
print "activationThreshold =", tm.getActivationThreshold()
print "newSynapseCount =", tm.getMaxNewSynapseCount()
print "initialPerm =", tm.getInitialPermanence()
print "connectedPerm =", tm.getConnectedPermanence()
print "permanenceInc =", tm.getPermanenceIncrement()
print "permanenceDec =", tm.getPermanenceDecrement()
print "predictedSegmentDecrement=", tm.getPredictedSegmentDecrement()
print
def runMultiplePass(df, model, nMultiplePass, nTrain):
"""
run CLA model through data record 0:nTrain nMultiplePass passes
"""
predictedField = model.getInferenceArgs()['predictedField']
print "run TM through the train data multiple times"
for nPass in xrange(nMultiplePass):
for j in xrange(nTrain):
inputRecord = getInputRecord(df, predictedField, j)
result = model.run(inputRecord)
if j % 100 == 0:
print " pass %i, record %i" % (nPass, j)
# reset temporal memory
model._getTPRegion().getSelf()._tfdr.reset()
return model
def runMultiplePassSPonly(df, model, nMultiplePass, nTrain):
"""
run CLA model SP through data record 0:nTrain nMultiplePass passes
"""
predictedField = model.getInferenceArgs()['predictedField']
print "run TM through the train data multiple times"
for nPass in xrange(nMultiplePass):
for j in xrange(nTrain):
inputRecord = getInputRecord(df, predictedField, j)
model._sensorCompute(inputRecord)
model._spCompute()
if j % 400 == 0:
print " pass %i, record %i" % (nPass, j)
return model
def movingAverage(a, n):
movingAverage = []
for i in xrange(len(a)):
start = max(0, i - n)
values = a[start:i+1]
movingAverage.append(sum(values) / float(len(values)))
return movingAverage
if __name__ == "__main__":
(_options, _args) = _getArgs()
dataSet = _options.dataSet
plot = _options.plot
classifierType = _options.classifier
if dataSet == "rec-center-hourly":
DATE_FORMAT = "%m/%d/%y %H:%M" # '7/2/10 0:00'
predictedField = "kw_energy_consumption"
elif dataSet == "nyc_taxi" or dataSet == "nyc_taxi_perturb" or dataSet =="nyc_taxi_perturb_baseline":
DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
predictedField = "passenger_count"
else:
raise RuntimeError("un recognized dataset")
modelParams = getModelParamsFromName(dataSet)
modelParams['modelParams']['clParams']['steps'] = str(_options.stepsAhead)
modelParams['modelParams']['clParams']['regionName'] = classifierType
print "Creating model from %s..." % dataSet
# use customized CLA model
model = ModelFactory.create(modelParams)
model.enableInference({"predictedField": predictedField})
model.enableLearning()
model._spLearningEnabled = True
model._tpLearningEnabled = True
printTPRegionParams(model._getTPRegion())
inputData = "%s/%s.csv" % (DATA_DIR, dataSet.replace(" ", "_"))
sensor = model._getSensorRegion()
encoderList = sensor.getSelf().encoder.getEncoderList()
if sensor.getSelf().disabledEncoder is not None:
classifier_encoder = sensor.getSelf().disabledEncoder.getEncoderList()
classifier_encoder = classifier_encoder[0]
else:
classifier_encoder = None
_METRIC_SPECS = getMetricSpecs(predictedField, stepsAhead=_options.stepsAhead)
metric = metrics.getModule(_METRIC_SPECS[0])
metricsManager = MetricsManager(_METRIC_SPECS, model.getFieldInfo(),
model.getInferenceType())
if plot:
plotCount = 1
plotHeight = max(plotCount * 3, 6)
fig = plt.figure(figsize=(14, plotHeight))
gs = gridspec.GridSpec(plotCount, 1)
plt.title(predictedField)
plt.ylabel('Data')
plt.xlabel('Timed')
plt.tight_layout()
plt.ion()
print "Load dataset: ", dataSet
df = pd.read_csv(inputData, header=0, skiprows=[1, 2])
nMultiplePass = 5
nTrain = 5000
print " run SP through the first %i samples %i passes " %(nMultiplePass, nTrain)
model = runMultiplePassSPonly(df, model, nMultiplePass, nTrain)
model._spLearningEnabled = False
maxBucket = classifier_encoder.n - classifier_encoder.w + 1
likelihoodsVecAll = np.zeros((maxBucket, len(df)))
prediction_nstep = None
time_step = []
actual_data = []
patternNZ_track = []
predict_data = np.zeros((_options.stepsAhead, 0))
predict_data_ML = []
negLL_track = []
activeCellNum = []
predCellNum = []
predSegmentNum = []
predictedActiveColumnsNum = []
trueBucketIndex = []
sp = model._getSPRegion().getSelf()._sfdr
spActiveCellsCount = np.zeros(sp.getColumnDimensions())
output = nupic_output.NuPICFileOutput([dataSet])
for i in xrange(len(df)):
inputRecord = getInputRecord(df, predictedField, i)
tp = model._getTPRegion()
tm = tp.getSelf()._tfdr
prePredictiveCells = tm.getPredictiveCells()
prePredictiveColumn = np.array(list(prePredictiveCells)) / tm.cellsPerColumn
result = model.run(inputRecord)
trueBucketIndex.append(model._getClassifierInputRecord(inputRecord).bucketIndex)
predSegmentNum.append(len(tm.activeSegments))
sp = model._getSPRegion().getSelf()._sfdr
spOutput = model._getSPRegion().getOutputData('bottomUpOut')
spActiveCellsCount[spOutput.nonzero()[0]] += 1
activeDutyCycle = np.zeros(sp.getColumnDimensions(), dtype=np.float32)
sp.getActiveDutyCycles(activeDutyCycle)
overlapDutyCycle = np.zeros(sp.getColumnDimensions(), dtype=np.float32)
sp.getOverlapDutyCycles(overlapDutyCycle)
if i % 100 == 0 and i > 0:
plt.figure(1)
plt.clf()
plt.subplot(2, 2, 1)
plt.hist(overlapDutyCycle)
plt.xlabel('overlapDutyCycle')
plt.subplot(2, 2, 2)
plt.hist(activeDutyCycle)
plt.xlabel('activeDutyCycle-1000')
plt.subplot(2, 2, 3)
plt.hist(spActiveCellsCount)
plt.xlabel('activeDutyCycle-Total')
plt.draw()
tp = model._getTPRegion()
tm = tp.getSelf()._tfdr
tpOutput = tm.infActiveState['t']
predictiveCells = tm.getPredictiveCells()
predCellNum.append(len(predictiveCells))
predColumn = np.array(list(predictiveCells))/ tm.cellsPerColumn
patternNZ = tpOutput.reshape(-1).nonzero()[0]
activeColumn = patternNZ / tm.cellsPerColumn
activeCellNum.append(len(patternNZ))
predictedActiveColumns = np.intersect1d(prePredictiveColumn, activeColumn)
predictedActiveColumnsNum.append(len(predictedActiveColumns))
result.metrics = metricsManager.update(result)
negLL = result.metrics["multiStepBestPredictions:multiStep:"
"errorMetric='negativeLogLikelihood':steps=%d:window=1000:"
"field=%s"%(_options.stepsAhead, predictedField)]
if i % 100 == 0 and i>0:
negLL = result.metrics["multiStepBestPredictions:multiStep:"
"errorMetric='negativeLogLikelihood':steps=%d:window=1000:"
"field=%s"%(_options.stepsAhead, predictedField)]
nrmse = result.metrics["multiStepBestPredictions:multiStep:"
"errorMetric='nrmse':steps=%d:window=1000:"
"field=%s"%(_options.stepsAhead, predictedField)]
numActiveCell = np.mean(activeCellNum[-100:])
numPredictiveCells = np.mean(predCellNum[-100:])
numCorrectPredicted = np.mean(predictedActiveColumnsNum[-100:])
print "After %i records, %d-step negLL=%f nrmse=%f ActiveCell %f PredCol %f CorrectPredCol %f" % \
(i, _options.stepsAhead, negLL, nrmse, numActiveCell,
numPredictiveCells, numCorrectPredicted)
last_prediction = prediction_nstep
prediction_nstep = \
result.inferences["multiStepBestPredictions"][_options.stepsAhead]
output.write([i], [inputRecord[predictedField]], [float(prediction_nstep)])
bucketLL = \
result.inferences['multiStepBucketLikelihoods'][_options.stepsAhead]
likelihoodsVec = np.zeros((maxBucket,))
if bucketLL is not None:
for (k, v) in bucketLL.items():
likelihoodsVec[k] = v
time_step.append(i)
actual_data.append(inputRecord[predictedField])
predict_data_ML.append(
result.inferences['multiStepBestPredictions'][_options.stepsAhead])
negLL_track.append(negLL)
likelihoodsVecAll[0:len(likelihoodsVec), i] = likelihoodsVec
if plot and i > 500:
# prepare data for display
if i > 100:
time_step_display = time_step[-500:-_options.stepsAhead]
actual_data_display = actual_data[-500+_options.stepsAhead:]
predict_data_ML_display = predict_data_ML[-500:-_options.stepsAhead]
likelihood_display = likelihoodsVecAll[:, i-499:i-_options.stepsAhead+1]
xl = [(i)-500, (i)]
else:
time_step_display = time_step
actual_data_display = actual_data
predict_data_ML_display = predict_data_ML
likelihood_display = likelihoodsVecAll[:, :i+1]
xl = [0, (i)]
plt.figure(2)
plt.clf()
plt.imshow(likelihood_display,
extent=(time_step_display[0], time_step_display[-1], 0, 40000),
interpolation='nearest', aspect='auto',
origin='lower', cmap='Reds')
plt.colorbar()
plt.plot(time_step_display, actual_data_display, 'k', label='Data')
plt.plot(time_step_display, predict_data_ML_display, 'b', label='Best Prediction')
plt.xlim(xl)
plt.xlabel('Time')
plt.ylabel('Prediction')
# plt.title('TM, useTimeOfDay='+str(True)+' '+dataSet+' test neg LL = '+str(np.nanmean(negLL)))
plt.xlim([17020, 17300])
plt.ylim([0, 30000])
plt.clim([0, 1])
plt.draw()
predData_TM_n_step = np.roll(np.array(predict_data_ML), _options.stepsAhead)
nTest = len(actual_data) - nTrain - _options.stepsAhead
NRMSE_TM = NRMSE(actual_data[nTrain:nTrain+nTest], predData_TM_n_step[nTrain:nTrain+nTest])
print "NRMSE on test data: ", NRMSE_TM
output.close()
# calculate neg-likelihood
predictions = np.transpose(likelihoodsVecAll)
truth = np.roll(actual_data, -5)
from nupic.encoders.scalar import ScalarEncoder as NupicScalarEncoder
encoder = NupicScalarEncoder(w=1, minval=0, maxval=40000, n=22, forced=True)
from plot import computeLikelihood, plotAccuracy
bucketIndex2 = []
negLL = []
minProb = 0.0001
for i in xrange(len(truth)):
bucketIndex2.append(np.where(encoder.encode(truth[i]))[0])
outOfBucketProb = 1 - sum(predictions[i,:])
prob = predictions[i, bucketIndex2[i]]
if prob == 0:
prob = outOfBucketProb
if prob < minProb:
prob = minProb
negLL.append( -np.log(prob))
negLL = computeLikelihood(predictions, truth, encoder)
negLL[:5000] = np.nan
x = range(len(negLL))
plt.figure()
plotAccuracy((negLL, x), truth, window=480, errorType='negLL')
np.save('./result/'+dataSet+classifierType+'TMprediction.npy', predictions)
np.save('./result/'+dataSet+classifierType+'TMtruth.npy', truth)
plt.figure()
activeCellNumAvg = movingAverage(activeCellNum, 100)
plt.plot(np.array(activeCellNumAvg)/tm.numberOfCells())
plt.xlabel('data records')
plt.ylabel('sparsity')
plt.xlim([0, 5000])
plt.savefig('result/sparsity_over_training.pdf')
plt.figure()
predCellNumAvg = movingAverage(predCellNum, 100)
predSegmentNumAvg = movingAverage(predSegmentNum, 100)
# plt.plot(np.array(predCellNumAvg))
plt.plot(np.array(predSegmentNumAvg),'r', label='NMDA spike')
plt.plot(activeCellNumAvg,'b', label='spikes')
plt.xlabel('data records')
plt.ylabel('NMDA spike #')
plt.legend()
plt.xlim([0, 5000])
plt.ylim([0, 42])
plt.savefig('result/nmda_spike_over_training.pdf')
|
agpl-3.0
|
bzero/statsmodels
|
statsmodels/sandbox/nonparametric/tests/ex_gam_am_new.py
|
34
|
2606
|
# -*- coding: utf-8 -*-
"""Example for gam.AdditiveModel and PolynomialSmoother
This example was written as a test case.
The data generating process is chosen so the parameters are well identified
and estimated.
Created on Fri Nov 04 13:45:43 2011
Author: Josef Perktold
"""
from __future__ import print_function
from statsmodels.compat.python import lrange, zip
import time
import numpy as np
#import matplotlib.pyplot as plt
from numpy.testing import assert_almost_equal
from scipy import stats
from statsmodels.sandbox.gam import AdditiveModel
from statsmodels.sandbox.gam import Model as GAM #?
from statsmodels.genmod import families
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.regression.linear_model import OLS, WLS
np.random.seed(8765993)
#seed is chosen for nice result, not randomly
#other seeds are pretty off in the prediction
#DGP: simple polynomial
order = 3
sigma_noise = 0.5
nobs = 1000 #1000 #with 1000, OLS and Additivemodel aggree in params at 2 decimals
lb, ub = -3.5, 4#2.5
x1 = np.linspace(lb, ub, nobs)
x2 = np.sin(2*x1)
x = np.column_stack((x1/x1.max()*2, x2))
exog = (x[:,:,None]**np.arange(order+1)[None, None, :]).reshape(nobs, -1)
idx = lrange((order+1)*2)
del idx[order+1]
exog_reduced = exog[:,idx] #remove duplicate constant
y_true = exog.sum(1) / 2.
z = y_true #alias check
d = x
y = y_true + sigma_noise * np.random.randn(nobs)
example = 1
if example == 1:
m = AdditiveModel(d)
m.fit(y)
y_pred = m.results.predict(d)
for ss in m.smoothers:
print(ss.params)
res_ols = OLS(y, exog_reduced).fit()
print(res_ols.params)
#assert_almost_equal(y_pred, res_ols.fittedvalues, 3)
if example > 0:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(exog)
y_pred = m.results.mu# + m.results.alpha #m.results.predict(d)
plt.figure()
plt.subplot(2,2,1)
plt.plot(y, '.', alpha=0.25)
plt.plot(y_true, 'k-', label='true')
plt.plot(res_ols.fittedvalues, 'g-', label='OLS', lw=2, alpha=-.7)
plt.plot(y_pred, 'r-', label='AM')
plt.legend(loc='upper left')
plt.title('gam.AdditiveModel')
counter = 2
for ii, xx in zip(['z', 'x1', 'x2'], [z, x[:,0], x[:,1]]):
sortidx = np.argsort(xx)
#plt.figure()
plt.subplot(2, 2, counter)
plt.plot(xx[sortidx], y[sortidx], '.', alpha=0.25)
plt.plot(xx[sortidx], y_true[sortidx], 'k.', label='true', lw=2)
plt.plot(xx[sortidx], y_pred[sortidx], 'r.', label='AM')
plt.legend(loc='upper left')
plt.title('gam.AdditiveModel ' + ii)
counter += 1
plt.show()
|
bsd-3-clause
|
toobaz/pandas
|
pandas/tests/io/test_date_converters.py
|
2
|
1204
|
from datetime import datetime
import numpy as np
import pandas.util.testing as tm
import pandas.io.date_converters as conv
def test_parse_date_time():
dates = np.array(["2007/1/3", "2008/2/4"], dtype=object)
times = np.array(["05:07:09", "06:08:00"], dtype=object)
expected = np.array([datetime(2007, 1, 3, 5, 7, 9), datetime(2008, 2, 4, 6, 8, 0)])
result = conv.parse_date_time(dates, times)
tm.assert_numpy_array_equal(result, expected)
def test_parse_date_fields():
days = np.array([3, 4])
months = np.array([1, 2])
years = np.array([2007, 2008])
result = conv.parse_date_fields(years, months, days)
expected = np.array([datetime(2007, 1, 3), datetime(2008, 2, 4)])
tm.assert_numpy_array_equal(result, expected)
def test_parse_all_fields():
hours = np.array([5, 6])
minutes = np.array([7, 8])
seconds = np.array([9, 0])
days = np.array([3, 4])
years = np.array([2007, 2008])
months = np.array([1, 2])
result = conv.parse_all_fields(years, months, days, hours, minutes, seconds)
expected = np.array([datetime(2007, 1, 3, 5, 7, 9), datetime(2008, 2, 4, 6, 8, 0)])
tm.assert_numpy_array_equal(result, expected)
|
bsd-3-clause
|
ttpro1995/CV_Assignment01
|
main.py
|
1
|
4154
|
# Thai Thien
# 1351040
import cv2
import util
from matplotlib import pyplot as plt
import numpy as np
global cur_img # global value cur_img
global grayscale
cur_img = None
cat = None
def nothing(a):
print (a)
def main():
global cur_img
global cat
global grayscale
status = 0
help = '''
i - Show original image
w - Save file as img.png into current directory
s - Smooth image. Drag the top bar to change the amount
S - A better way to smooth image. Drag the top bar to change the amount
G or g - turn image into grayscale.
c - display image in green, red, blue
x - Sobel filter in x direction
y - Sobel filter in y direction
M or m - display magnitude of gradient.
r - rotate mode. Drag the track bar to rotate the image.
q - quit
'''
# load image
cat = cv2.imread('cat.jpg')
grayscale = cv2.cvtColor(cat,cv2.COLOR_BGR2GRAY)
cur_img = cat.copy()
cv2.imshow('image',cur_img)
while(1):
cv2.namedWindow('image')
key = cv2.waitKey()
if key == ord('i'):
cur_img = cat.copy()
print 'i'
elif key == ord('g'): #
cur_img = cv2.cvtColor(cat,cv2.COLOR_BGR2GRAY)
elif key == ord('G'):
cur_img = util.bgr2gray(cat)
elif key == ord('s'):
def callback(value):
# use global variable because we can only pass in one parameter
global cur_img
global cat
cur_img = cv2.GaussianBlur(cat, (3, 3), value)
cv2.imshow('image', cur_img)
if (value==0):
cv2.imshow('image',cat) # display original image when value = 0
cv2.createTrackbar('Smooth',"image",0,255, callback)
elif key == ord('S'):
def callback(value):
# use global variable because we can only pass in one parameter
global cur_img
global cat
cur_img = util.smooth(value,10,grayscale)
cv2.imshow('image', cur_img)
if (value==0):
cv2.imshow('image',cat) # display original image when value = 0
cv2.createTrackbar('Smooth',"image",0,100, callback)
elif key == ord('x'):
cur_img = util.derivative(grayscale,'x', True)
elif key == ord('y'):
cur_img = util.derivative(grayscale,'y', True)
elif key == ord('m'):
cur_img = util.magnitude(grayscale,0)
elif key == ord('M'):
cur_img = util.magnitude(grayscale,1)
elif key == ord('r'):
def callback(value):
# use global variable because we can only pass in one parameter
global grayscale
global cur_img
cur_img = util.nohole_rotation(grayscale,value)
cv2.imshow('image', cur_img)
cv2.createTrackbar('Rotation',"image",0,360, callback)
elif key == ord('c'): #
c1 = cat[:,:,0]
c2 = cat[:,:,1]
c3 = cat[:,:,2]
if status == 0:
channel = c1
status = 1
elif status == 1:
channel = c2
status = 2
elif status == 2:
channel = c3
status = 0
cur_img = np.zeros((cat.shape[0], cat.shape[1], 3),
dtype = cat.dtype)
cur_img[:,:,status] = channel
elif key == ord('w'):
cv2.imwrite('img.png',cur_img)
print 'w'
elif key == ord('p'):
def callback(value):
# use global variable because we can only pass in one parameter
global grayscale
global cur_img
cur_img = util.plotGradVec(grayscale,n = value)
cv2.imshow('image', cur_img)
cv2.createTrackbar('Plot',"image",1,20, callback)
elif key == ord('h'):
print help
elif key == ord('q'):
print 'q'
quit()
cv2.imshow('image', cur_img)
if __name__ == '__main__':
main()
|
mit
|
CallaJun/hackprince
|
indico/matplotlib/sphinxext/plot_directive.py
|
11
|
26894
|
"""
A directive for including a matplotlib plot in a Sphinx document.
By default, in HTML output, `plot` will include a .png file with a
link to a high-res .png and .pdf. In LaTeX output, it will include a
.pdf.
The source code for the plot may be included in one of three ways:
1. **A path to a source file** as the argument to the directive::
.. plot:: path/to/plot.py
When a path to a source file is given, the content of the
directive may optionally contain a caption for the plot::
.. plot:: path/to/plot.py
This is the caption for the plot
Additionally, one my specify the name of a function to call (with
no arguments) immediately after importing the module::
.. plot:: path/to/plot.py plot_function1
2. Included as **inline content** to the directive::
.. plot::
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
img = mpimg.imread('_static/stinkbug.png')
imgplot = plt.imshow(img)
3. Using **doctest** syntax::
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
Options
-------
The ``plot`` directive supports the following options:
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. The default can be changed
using the `plot_include_source` variable in conf.py
encoding : str
If this source file is in a non-UTF8 or non-ASCII encoding,
the encoding must be specified using the `:encoding:` option.
The encoding will not be inferred using the ``-*- coding -*-``
metacomment.
context : bool or str
If provided, the code will be run in the context of all
previous plot directives for which the `:context:` option was
specified. This only applies to inline code plot directives,
not those run from files. If the ``:context: reset`` is specified,
the context is reset for this and future plots.
nofigs : bool
If specified, the code block will be run, but no figures will
be inserted. This is usually useful with the ``:context:``
option.
Additionally, this directive supports all of the options of the
`image` directive, except for `target` (since plot will add its own
target). These include `alt`, `height`, `width`, `scale`, `align` and
`class`.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_html_show_source_link
Whether to show a link to the source in HTML.
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which ``plot::`` file names are relative
to. (If None or empty, file names are relative to the
directory where the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen.
plot_html_show_formats
Whether to show links to the files in HTML.
plot_rcparams
A dictionary containing any non-standard rcParams that should
be applied before each plot.
plot_apply_rcparams
By default, rcParams are applied when `context` option is not used in
a plot directive. This configuration option overrides this behavior
and applies rcParams before each plot.
plot_working_directory
By default, the working directory will be changed to the directory of
the example, so the code can get at its data files, if any. Also its
path will be added to `sys.path` so it can import any helper modules
sitting beside it. This configuration option can be used to specify
a central directory (also added to `sys.path`) where data files and
helper modules for all code are located.
plot_template
Provide a customized template for preparing restructured text.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import sys, os, shutil, io, re, textwrap
from os.path import relpath
import traceback
if not six.PY3:
import cStringIO
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.images import Image
align = Image.align
import sphinx
sphinx_version = sphinx.__version__.split(".")
# The split is necessary for sphinx beta versions where the string is
# '6b1'
sphinx_version = tuple([int(re.split('[^0-9]', x)[0])
for x in sphinx_version[:2]])
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
import matplotlib
import matplotlib.cbook as cbook
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import _pylab_helpers
__version__ = 2
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_context(arg):
if arg in [None, 'reset']:
return arg
else:
raise ValueError("argument should be None or 'reset'")
return directives.choice(arg, ('None', 'reset'))
def _option_format(arg):
return directives.choice(arg, ('python', 'doctest'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
def mark_plot_labels(app, document):
"""
To make plots referenceable, we need to move the reference from
the "htmlonly" (or "latexonly") node to the actual figure node
itself.
"""
for name, explicit in six.iteritems(document.nametypes):
if not explicit:
continue
labelid = document.nameids[name]
if labelid is None:
continue
node = document.ids[labelid]
if node.tagname in ('html_only', 'latex_only'):
for n in node:
if n.tagname == 'figure':
sectname = name
for c in n:
if c.tagname == 'caption':
sectname = c.astext()
break
node['ids'].remove(labelid)
node['names'].remove(name)
n['ids'].append(labelid)
n['names'].append(name)
document.settings.env.labels[name] = \
document.settings.env.docname, labelid, sectname
break
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
'context': _option_context,
'nofigs': directives.flag,
'encoding': directives.encoding
}
app.add_directive('plot', plot_directive, True, (0, 2, False), **options)
app.add_config_value('plot_pre_code', None, True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_html_show_source_link', True, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_config_value('plot_rcparams', {}, True)
app.add_config_value('plot_apply_rcparams', False, True)
app.add_config_value('plot_working_directory', None, True)
app.add_config_value('plot_template', None, True)
app.connect(str('doctree-read'), mark_plot_labels)
#------------------------------------------------------------------------------
# Doctest handling
#------------------------------------------------------------------------------
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
def remove_coding(text):
"""
Remove the coding comment, which six.exec_ doesn't like.
"""
return re.sub(
"^#\s*-\*-\s*coding:\s*.*-\*-$", "", text, flags=re.MULTILINE)
#------------------------------------------------------------------------------
# Template
#------------------------------------------------------------------------------
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{% for option in options -%}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{{ caption }}
{% endfor %}
{{ only_latex }}
{% for img in images %}
{% if 'pdf' in img.formats -%}
.. image:: {{ build_dir }}/{{ img.basename }}.pdf
{% endif -%}
{% endfor %}
{{ only_texinfo }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.png
{% for option in options -%}
{{ option }}
{% endfor %}
{% endfor %}
"""
exception_template = """
.. htmlonly::
[`source code <%(linkdir)s/%(basename)s.py>`__]
Exception occurred rendering plot.
"""
# the context of the plot for all directives specified with the
# :context: option
plot_context = dict()
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived) or
(os.path.exists(original) and
os.stat(derived).st_mtime < os.stat(original).st_mtime))
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None, function_name=None):
"""
Import a Python module from a path, and run the function given by
name, if function_name is not None.
"""
# Change the working directory to the directory of the example, so
# it can get at its data files, if any. Add its path to sys.path
# so it can import any helper modules sitting beside it.
if six.PY2:
pwd = os.getcwdu()
else:
pwd = os.getcwd()
old_sys_path = list(sys.path)
if setup.config.plot_working_directory is not None:
try:
os.chdir(setup.config.plot_working_directory)
except OSError as err:
raise OSError(str(err) + '\n`plot_working_directory` option in'
'Sphinx configuration file must be a valid '
'directory path')
except TypeError as err:
raise TypeError(str(err) + '\n`plot_working_directory` option in '
'Sphinx configuration file must be a string or '
'None')
sys.path.insert(0, setup.config.plot_working_directory)
elif code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
# Redirect stdout
stdout = sys.stdout
if six.PY3:
sys.stdout = io.StringIO()
else:
sys.stdout = cStringIO.StringIO()
# Assign a do-nothing print function to the namespace. There
# doesn't seem to be any other way to provide a way to (not) print
# that works correctly across Python 2 and 3.
def _dummy_print(*arg, **kwarg):
pass
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
if setup.config.plot_pre_code is None:
six.exec_(six.text_type("import numpy as np\n" +
"from matplotlib import pyplot as plt\n"), ns)
else:
six.exec_(six.text_type(setup.config.plot_pre_code), ns)
ns['print'] = _dummy_print
if "__main__" in code:
six.exec_("__name__ = '__main__'", ns)
code = remove_coding(code)
six.exec_(code, ns)
if function_name is not None:
six.exec_(function_name + "()", ns)
except (Exception, SystemExit) as err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
def clear_state(plot_rcparams, close=True):
if close:
plt.close('all')
matplotlib.rc_file_defaults()
matplotlib.rcParams.update(plot_rcparams)
def render_figures(code, code_path, output_dir, output_base, context,
function_name, config, context_reset=False):
"""
Run a pyplot script and save the low and high res PNGs and a PDF
in *output_dir*.
Save the images under *output_dir* with file names derived from
*output_base*
"""
# -- Parse format list
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 200}
formats = []
plot_formats = config.plot_formats
if isinstance(plot_formats, six.string_types):
plot_formats = eval(plot_formats)
for fmt in plot_formats:
if isinstance(fmt, six.string_types):
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt)==2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
for i, code_piece in enumerate(code_pieces):
images = []
for j in xrange(1000):
if len(code_pieces) > 1:
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
else:
img = ImageFile('%s_%02d' % (output_base, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# We didn't find the files, so build them
results = []
if context:
ns = plot_context
else:
ns = {}
if context_reset:
clear_state(config.plot_rcparams)
for i, code_piece in enumerate(code_pieces):
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams, close=not context)
run_code(code_piece, code_path, ns, function_name)
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
elif len(code_pieces) == 1:
img = ImageFile("%s_%02d" % (output_base, j), output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
except Exception as err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
results.append((code_piece, images))
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams, close=not context)
return results
def run(arguments, content, options, state_machine, state, lineno):
# The user may provide a filename *or* Python code content, but not both
if arguments and content:
raise RuntimeError("plot:: directive can't have both args and content")
document = state_machine.document
config = document.settings.env.config
nofigs = 'nofigs' in options
options.setdefault('include-source', config.plot_include_source)
context = 'context' in options
context_reset = True if (context and options['context'] == 'reset') else False
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if len(arguments):
if not config.plot_basedir:
source_file_name = os.path.join(setup.app.builder.srcdir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
# If there is content, it will be passed as a caption.
caption = '\n'.join(content)
# If the optional function name is provided, use it
if len(arguments) == 2:
function_name = arguments[1]
else:
function_name = None
with io.open(source_file_name, 'r', encoding='utf-8') as fd:
code = fd.read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
function_name = None
caption = ''
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if 'format' in options:
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
# get rid of .. in paths, also changes pathsep
# see note in Python docs for warning about symbolic links on Windows.
# need to compare source and dest paths at end
build_dir = os.path.normpath(build_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir) # no problem here for me, but just use built-ins
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = render_figures(code, source_file_name, build_dir, output_base,
context, function_name, config,
context_reset=context_reset)
errors = []
except PlotError as err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
source_file_name, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# Properly indent the caption
caption = '\n'.join(' ' + line.strip()
for line in caption.split('\n'))
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
if nofigs:
images = []
opts = [':%s: %s' % (key, val) for key, val in six.iteritems(options)
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
only_texinfo = ".. only:: texinfo"
# Not-None src_link signals the need for a source link in the generated
# html
if j == 0 and config.plot_html_show_source_link:
src_link = source_link
else:
src_link = None
result = format_template(
config.plot_template or TEMPLATE,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
only_texinfo=only_texinfo,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats and not nofigs,
caption=caption)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory, if necessary
if not os.path.exists(dest_dir):
cbook.mkdirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
destimg = os.path.join(dest_dir, os.path.basename(fn))
if fn != destimg:
shutil.copyfile(fn, destimg)
# copy script (if necessary)
target_name = os.path.join(dest_dir, output_base + source_ext)
with io.open(target_name, 'w', encoding="utf-8") as f:
if source_file_name == rst_file:
code_escaped = unescape_doctest(code)
else:
code_escaped = code
f.write(code_escaped)
return errors
|
lgpl-3.0
|
UT-CWE/Hyospy
|
Hyospy_ensemble/lib/SUNTANS/DataIO/romsio_old.py
|
1
|
61855
|
"""
Tools for dealing with ROMS model output
See Octant project as well
Created on Fri Mar 08 15:09:46 2013
@author: mrayson
"""
import numpy as np
from netCDF4 import Dataset, MFDataset, num2date
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
from scipy import interpolate
# Private modules
from interpXYZ import interpXYZ
import othertime
from timeseries import timeseries
import operator
from maptools import ll2lcc
from mygeometry import MyLine
import pdb
try:
from octant.slice import isoslice
except:
print 'Warning - could not import octant package.'
import pdb
class roms_grid(object):
"""
Class for ROMS grid
"""
def __init__(self,ncfile):
self.grdfile = ncfile
self.readGrid()
def readGrid(self):
"""
Read in the main grid variables from the grid netcdf file
"""
try:
nc = MFDataset(self.grdfile, 'r')
except:
nc = Dataset(self.grdfile, 'r')
varnames = ['angle','lon_rho','lat_rho','lon_psi','lat_psi','lon_u','lat_u',\
'lon_v','lat_v','h','f','mask_rho','mask_psi','mask_u','mask_v','pm','pn']
for vv in varnames:
try:
setattr(self,vv,nc.variables[vv][:])
except:
print 'Cannot find variable: %s'%vv
nc.close()
def Writefile(self,outfile,verbose=True):
"""
Writes subsetted grid and coordinate variables to a netcdf file
Code modified from roms.py in the Octant package
"""
self.outfile = outfile
Mp, Lp = self.lon_rho.shape
M, L = self.lon_psi.shape
N = self.s_rho.shape[0] # vertical layers
xl = self.lon_rho[self.mask_rho==1.0].ptp()
el = self.lat_rho[self.mask_rho==1.0].ptp()
# Write ROMS grid to file
nc = Dataset(outfile, 'w', format='NETCDF3_CLASSIC')
nc.Description = 'ROMS subsetted history file'
nc.Author = ''
nc.Created = datetime.now().isoformat()
nc.type = 'ROMS HIS file'
nc.createDimension('xi_rho', Lp)
nc.createDimension('xi_u', L)
nc.createDimension('xi_v', Lp)
nc.createDimension('xi_psi', L)
nc.createDimension('eta_rho', Mp)
nc.createDimension('eta_u', Mp)
nc.createDimension('eta_v', M)
nc.createDimension('eta_psi', M)
nc.createDimension('s_rho', N)
nc.createDimension('s_w', N+1)
nc.createDimension('ocean_time', None)
nc.createVariable('xl', 'f8', ())
nc.variables['xl'].units = 'meters'
nc.variables['xl'] = xl
nc.createVariable('el', 'f8', ())
nc.variables['el'].units = 'meters'
nc.variables['el'] = el
nc.createVariable('spherical', 'S1', ())
nc.variables['spherical'] = 'F'
def write_nc_var(var, name, dimensions, units=None):
nc.createVariable(name, 'f8', dimensions)
if units is not None:
nc.variables[name].units = units
nc.variables[name][:] = var
if verbose:
print ' ... wrote ', name
# Grid variables
write_nc_var(self.angle, 'angle', ('eta_rho', 'xi_rho'))
write_nc_var(self.h, 'h', ('eta_rho', 'xi_rho'), 'meters')
write_nc_var(self.f, 'f', ('eta_rho', 'xi_rho'), 'seconds-1')
write_nc_var(self.mask_rho, 'mask_rho', ('eta_rho', 'xi_rho'))
write_nc_var(self.mask_u, 'mask_u', ('eta_u', 'xi_u'))
write_nc_var(self.mask_v, 'mask_v', ('eta_v', 'xi_v'))
write_nc_var(self.mask_psi, 'mask_psi', ('eta_psi', 'xi_psi'))
write_nc_var(self.lon_rho, 'lon_rho', ('eta_rho', 'xi_rho'), 'degrees')
write_nc_var(self.lat_rho, 'lat_rho', ('eta_rho', 'xi_rho'), 'degrees')
write_nc_var(self.lon_u, 'lon_u', ('eta_u', 'xi_u'), 'degrees')
write_nc_var(self.lat_u, 'lat_u', ('eta_u', 'xi_u'), 'degrees')
write_nc_var(self.lon_v, 'lon_v', ('eta_v', 'xi_v'), 'degrees')
write_nc_var(self.lat_v, 'lat_v', ('eta_v', 'xi_v'), 'degrees')
write_nc_var(self.lon_psi, 'lon_psi', ('eta_psi', 'xi_psi'), 'degrees')
write_nc_var(self.lat_psi, 'lat_psi', ('eta_psi', 'xi_psi'), 'degrees')
write_nc_var(self.pm, 'pm', ('eta_rho', 'xi_rho'), 'degrees')
write_nc_var(self.pn, 'pn', ('eta_rho', 'xi_rho'), 'degrees')
# Vertical coordinate variables
write_nc_var(self.s_rho, 's_rho', ('s_rho',))
write_nc_var(self.s_w, 's_w', ('s_w',))
write_nc_var(self.Cs_r, 'Cs_r', ('s_rho',))
write_nc_var(self.Cs_w, 'Cs_w', ('s_w',))
write_nc_var(self.hc, 'hc', ())
write_nc_var(self.Vstretching, 'Vstretching', ())
write_nc_var(self.Vtransform, 'Vtransform', ())
nc.sync()
def nc_add_dimension(self,outfile,name,length):
"""
Add a dimension to an existing netcdf file
"""
nc = Dataset(outfile, 'a')
nc.createDimension(name, length)
nc.close()
def nc_add_var(self,outfile,data,name,dimensions,units=None,long_name=None,coordinates=None):
"""
Add a new variable and write the data
"""
nc = Dataset(outfile, 'a')
nc.createVariable(name, 'f8', dimensions)
if units is not None:
nc.variables[name].units = units
if coordinates is not None:
nc.variables[name].coordinates = coordinates
if long_name is not None:
nc.variables[name].long_name = long_name
nc.variables[name][:] = data.copy()
nc.sync()
nc.close()
def nc_add_varnodata(self,outfile,name,dimensions,units=None,long_name=None,coordinates=None):
"""
Add a new variable and doesn't write the data
"""
nc = Dataset(outfile, 'a')
nc.createVariable(name, 'f8', dimensions)
if units is not None:
nc.variables[name].units = units
if coordinates is not None:
nc.variables[name].coordinates = coordinates
if long_name is not None:
nc.variables[name].long_name = long_name
nc.close()
def findNearset(self,x,y,grid='rho'):
"""
Return the J,I indices of the nearst grid cell to x,y
"""
if grid == 'rho':
lon = self.lon_rho
lat = self.lat_rho
elif grid == 'u':
lon = self.lon_u
lat = self.lat_u
elif grid =='v':
lon = self.lon_v
lat = self.lat_v
elif grid =='psi':
lon = self.lon_psi
lat = self.lat_psi
dist = np.sqrt( (lon - x)**2 + (lat - y)**2)
return np.argwhere(dist==dist.min())
def utmconversion(self,lon,lat,utmzone,isnorth):
"""
Convert the ROMS grid to utm coordinates
"""
from maptools import ll2utm
M,N = lon.shape
xy = ll2utm(np.hstack((np.reshape(lon,(M*N,1)),np.reshape(lat,(M*N,1)))),utmzone,north=isnorth)
return np.reshape(xy[:,0],(M,N)), np.reshape(xy[:,1],(M,N))
class ROMS(roms_grid):
"""
General class for reading and plotting ROMS model output
"""
varname = 'zeta'
JRANGE = None
IRANGE = None
zlayer = False # True load z layer, False load sigma layer
K = [0] # Layer to extract, 0 bed, -1 surface, -99 all
tstep = [0] # - 1 last step, -99 all time steps
clim = None # Plot limits
def __init__(self,romsfile,**kwargs):
self.__dict__.update(kwargs)
self.romsfile = romsfile
# Load the grid
roms_grid.__init__(self,self.romsfile)
# Open the netcdf object
self._openNC()
# Load the time information
try:
self._loadTime()
except:
print 'No time variable.'
# Check the spatial indices of the variable
self._loadVarCoords()
self.listCoordVars()
self._checkCoords(self.varname)
# Check the vertical coordinates
self._readVertCoords()
self._checkVertCoords(self.varname)
def listCoordVars(self):
"""
List all of the variables that have the 'coordinate' attribute
"""
self.coordvars=[]
for vv in self.nc.variables.keys():
if hasattr(self.nc.variables[vv],'coordinates'):
#print '%s - %s'%(vv,self.nc.variables[vv].long_name)
self.coordvars.append(vv)
return self.coordvars
def loadData(self,varname=None,tstep=None):
"""
Loads model data from the netcdf file
"""
if varname == None:
varname=self.varname
self._checkCoords(varname)
else:
self._checkCoords(varname)
if self.ndim == 4:
self._checkVertCoords(varname)
if tstep == None:
tstep = self.tstep
if self.ndim==1:
data = self.nc.variables[varname][tstep]
elif self.ndim == 2:
data = self.nc.variables[varname][self.JRANGE[0]:self.JRANGE[1],self.IRANGE[0]:self.IRANGE[1]]
elif self.ndim == 3:
data = self.nc.variables[varname][tstep,self.JRANGE[0]:self.JRANGE[1],self.IRANGE[0]:self.IRANGE[1]]
elif self.ndim == 4:
data = self.nc.variables[varname][tstep,self.K,self.JRANGE[0]:self.JRANGE[1],self.IRANGE[0]:self.IRANGE[1]]
if self.ndim == 4 and self.zlayer==True:
# Slice along z layers
print 'Extracting data along z-coordinates...'
dataz = np.zeros((len(tstep),)+self.Z.shape+self.X.shape)
for ii,tt in enumerate(tstep):
#Z = self.calcDepth(zeta=self.loadData(varname='zeta',tstep=[tt]))
Z = self.calcDepth()[:,self.JRANGE[0]:self.JRANGE[1],\
self.IRANGE[0]:self.IRANGE[1]].squeeze()
if len(Z.shape) > 1:
dataz[ii,:,:] = isoslice(data[ii,:,:,:].squeeze(),Z,self.Z)
else:
# Isoslice won't work on 1-D arrays
F = interpolate.interp1d(Z,data[ii,:,:,:].squeeze(),bounds_error=False)
dataz[ii,:,:] = F(self.Z)[:,np.newaxis,np.newaxis]
data = dataz
#self._checkCoords(self.varname)
# Reduce rank
self.data = data.squeeze()
return self.data
def loadTimeSeries(self,x,y,z=None,varname=None,trange=None):
"""
Load a time series at point x,y
Set z=None to load all layers, else load depth
"""
if varname == None:
self.varname = self.varname
else:
self.varname = varname
self._checkCoords(self.varname)
if self.ndim == 4:
self._checkVertCoords(self.varname)
if z == None:
self.zlayer=False
self.K = [-99]
else:
self.zlayer=True
self.K = [z]
if trange==None:
tstep=np.arange(0,self.Nt)
# Set the index range to grab
JI = self.findNearset(x,y,grid=self.gridtype)
self.JRANGE = [JI[0][0], JI[0][0]+1]
self.IRANGE = [JI[0][1], JI[0][1]+1]
if self.zlayer:
Zout = z
else:
# Return the depths at each time step
h = self.h[self.JRANGE[0]:self.JRANGE[1],self.IRANGE[0]:self.IRANGE[1]].squeeze()
zeta=self.loadData(varname='zeta',tstep=tstep)
h = h*np.ones(zeta.shape)
Zout = get_depth(self.S,self.C,self.hc,h,zeta=zeta, Vtransform=self.Vtransform).squeeze()
return self.loadData(varname=varname,tstep=tstep), Zout
def calcDepth(self,zeta=None):
"""
Calculates the depth array for the current variable
"""
#h = self.h[self.JRANGE[0]:self.JRANGE[1],self.IRANGE[0]:self.IRANGE[1]].squeeze()
if self.gridtype == 'rho':
h = self.h
elif self.gridtype == 'psi':
h = 0.5 * (self.h[1:,1:] + self.h[0:-1,0:-1])
elif self.gridtype == 'u':
h = 0.5 * (self.h[:,1:] + self.h[:,0:-1])
elif self.gridtype == 'v':
h = 0.5 * (self.h[1:,:] + self.h[0:-1,:])
return get_depth(self.S,self.C,self.hc,h,zeta=zeta, Vtransform=self.Vtransform).squeeze()
def depthInt(self,var,grid='rho',cumulative=False):
"""
Depth-integrate data in variable, var (array [Nz, Ny, Nx])
Set cumulative = True for cumulative integration i.e. for pressure calc.
"""
sz = var.shape
if not sz[0] == self.Nz:
raise Exception, 'length of dimension 0 must equal %d (currently %d)'%(self.Nz,sz[0])
if not len(sz)==3:
raise Exception, 'only 3-D arrays are supported.'
if grid == 'rho':
h = self.h
elif grid == 'psi':
h = 0.5 * (self.h[1:,1:] + self.h[0:-1,0:-1])
elif grid == 'u':
h = 0.5 * (self.h[:,1:] + self.h[:,0:-1])
elif grid == 'v':
h = 0.5 * (self.h[1:,:] + self.h[0:-1,:])
z_w = get_depth(self.s_w,self.Cs_w,self.hc,h,Vtransform=self.Vtransform).squeeze()
dz = np.diff(z_w,axis=0)
if cumulative:
return np.cumsum(dz*var,axis=0)
else:
return np.sum(dz*var,axis=0)
def depthAvg(self,var,grid='rho'):
"""
Depth-average data in variable, var (array [Nz, Ny, Nx])
"""
sz = var.shape
if not sz[0] == self.Nz:
raise Exception, 'length of dimension 0 must equal %d (currently %d)'%(self.Nz,sz[0])
if not len(sz)==3:
raise Exception, 'only 3-D arrays are supported.'
if grid == 'rho':
h = self.h
elif grid == 'psi':
h = 0.5 (self.h[1:,1:] + self.h[0:-1,0:-1])
elif grid == 'u':
h = 0.5 (self.h[:,1:] + self.h[:,0:-1])
elif grid == 'v':
h = 0.5 (self.h[1:,:] + self.h[0:-1,:])
z_w = get_depth(self.s_w,self.Cs_w,self.hc,h,Vtransform=self.Vtransform).squeeze()
dz = np.diff(z_w,axis=0)
return np.sum(dz*var,axis=0) / h
def areaInt(self,var,grid='rho'):
"""
Calculate the area integral of var
"""
if grid == 'rho':
dx = 1.0/self.pm
dy = 1.0/self.pn
elif grid == 'psi':
dx = 1.0/(0.5*(self.pm[1:,1:] + self.pm[0:-1,0:-1]))
dy = 1.0/(0.5*(self.pn[1:,1:] + self.pn[0:-1,0:-1]))
elif grid == 'u':
dx = 1.0/(0.5 * (self.pm[:,1:] + self.pm[:,0:-1]))
dy = 1.0/(0.5 * (self.pn[:,1:] + self.pn[:,0:-1]))
elif grid == 'v':
dx = 0.5 * (self.pm[1:,:] + self.pm[0:-1,:])
dy = 0.5 * (self.pn[1:,:] + self.pn[0:-1,:])
A = dx*dy
return np.sum(var*A)
def gradZ(self,var,grid='rho',cumulative=False):
"""
Depth-gradient of data in variable, var (array [Nz, Ny, Nx])
"""
sz = var.shape
#print sz
if not sz[0] == self.Nz:
raise Exception, 'length of dimension 0 must equal %d (currently %d)'%(self.Nz,sz[0])
h = self.h[self.JRANGE[0]:self.JRANGE[1],self.IRANGE[0]:self.IRANGE[1]].squeeze()
z_r = get_depth(self.s_rho,self.Cs_r,self.hc,h,Vtransform=self.Vtransform).squeeze()
dz = np.diff(z_r,axis=0)
dz_mid = 0.5 * (dz[1:,...] + dz[0:-1,...]) # N-2
var_mid = 0.5 * (var[1:,...] + var[0:-1,...])
dv_dz = np.zeros(sz)
# 2-nd order mid-points
dv_dz[1:-1,...] = (var_mid[1:,...] - var_mid[0:-1,...]) / dz_mid
# 1st order end points
dv_dz[0,...] = (var[1,...] - var[0,...]) / dz[0,...]
dv_dz[-1,...] = (var[-1,...] - var[-2,...]) / dz[-1,...]
return dv_dz
def MLD(self,tstep,thresh=-0.006,z_max=-20.0):
"""
Mixed layer depth calculation
thresh is the density gradient threshold
z_max is the min mixed layer depth
"""
# Load the density data
self.K=[-99]
drho_dz=self.gradZ(self.loadData(varname='rho',tstep=tstep))
# Mask drho_dz where z >= z_max
z = self.calcDepth()
mask = z >= z_max
drho_dz[mask] = 0.0
#
mld_ind = np.where(drho_dz <= thresh)
zout = -99999.0*np.ones(z.shape)
zout[mld_ind[0],mld_ind[1],mld_ind[2]] = z[mld_ind[0],mld_ind[1],mld_ind[2]]
mld = np.max(zout,axis=0)
# Isoslice averages when there is more than one value
#mld = isoslice(z,drho_dz,thresh)
mld = np.max([mld,-self.h],axis=0)
return mld
def MLDmask(self,mld,grid='rho'):
"""
Compute a 3D mask for variables beneath the mixed layer
"""
if grid == 'rho':
h = self.h
elif grid == 'psi':
h = 0.5 * (self.h[1:,1:] + self.h[0:-1,0:-1])
mld =0.5 * (mld[1:,1:] + mld[0:-1,0:-1])
elif grid == 'u':
h = 0.5 * (self.h[:,1:] + self.h[:,0:-1])
mld = 0.5 * (mld[:,1:] + mld[:,0:-1])
elif grid == 'v':
h = 0.5 * (self.h[1:,:] + self.h[0:-1,:])
mld = 0.5 * (mld[1:,:] + mld[0:-1,:])
z = get_depth(self.s_rho,self.Cs_r,self.hc,h,Vtransform=self.Vtransform).squeeze()
mask = np.zeros(z.shape)
for jj in range(mld.shape[0]):
for ii in range(mld.shape[1]):
ind = z[:,jj,ii] >= mld[jj,ii]
if np.size(ind)>0:
mask[ind,jj,ii]=1.0
return mask
def pcolor(self,data=None,titlestr=None,colorbar=True,ax=None,fig=None,**kwargs):
"""
Pcolor plot of the data in variable
"""
if data==None:
data=self.loadData()
if self.clim==None:
clim=[data.min(),data.max()]
else:
clim=self.clim
if fig==None:
fig = plt.gcf()
if ax==None:
ax = fig.gca()
p1 = ax.pcolormesh(self.X,self.Y,data,vmin=clim[0],vmax=clim[1],**kwargs)
ax.set_aspect('equal')
if colorbar:
plt.colorbar(p1)
if titlestr==None:
plt.title(self._genTitle(self.tstep[0]))
else:
plt.title(titlestr)
return p1
def contourf(self, data=None, clevs=20, titlestr=None,colorbar=True,**kwargs):
"""
contour plot of the data in variable
"""
if data==None:
data=self.loadData()
if self.clim==None:
clim=[data.min(),data.max()]
else:
clim=self.clim
fig = plt.gcf()
ax = fig.gca()
p1 = plt.contourf(self.X,self.Y,data,clevs,vmin=clim[0],vmax=clim[1],**kwargs)
ax.set_aspect('equal')
if colorbar:
plt.colorbar(p1)
if titlestr==None:
plt.title(self._genTitle(self.tstep[0]))
else:
plt.title(titlestr)
return p1
def contourbathy(self,clevs=np.arange(0,3000,100),**kwargs):
p1 = plt.contour(self.lon_rho,self.lat_rho,self.h,clevs,**kwargs)
return p1
def getTstep(self,tstart,tend,timeformat='%Y%m%d.%H%M'):
"""
Returns a vector of the time indices between tstart and tend
tstart and tend can be string with format=timeformat ['%Y%m%d.%H%M' - default]
Else tstart and tend can be datetime objects
"""
try:
t0 = datetime.strptime(tstart,timeformat)
t1 = datetime.strptime(tend,timeformat)
except:
# Assume the time is already in datetime format
t0 = tstart
t1 = tend
n1 = othertime.findNearest(t0,self.time)
n2 = othertime.findNearest(t1,self.time)
if n1==n2:
return [n1,n2]
else:
return range(n1,n2)
def _genTitle(self,tstep):
"""
Generates a title for plots
"""
if self.zlayer:
titlestr = '%s [%s]\nz: %6.1f m, %s'%(self.long_name,self.units,self.Z,datetime.strftime(self.time[tstep],'%d-%b-%Y %H:%M:%S'))
else:
titlestr = '%s [%s]\nsigma[%d], %s'%(self.long_name,self.units,self.K[0],datetime.strftime(self.time[tstep],'%d-%b-%Y %H:%M:%S'))
return titlestr
def _checkCoords(self,varname):
"""
Load the x and y coordinates of the present variable, self.varname
"""
#print 'updating coordinate info...'
# check if the variable is in the file to begin
if varname not in self.coordvars:
print 'Warning - variable %s not in file'%varname
varname=self.coordvars[0]
self.varname=varname
C = self.varcoords[varname].split()
self.ndim = len(C)
if self.ndim==1:
return
self.xcoord = C[0]
self.ycoord = C[1]
if self.JRANGE==None:
self.JRANGE = [0,self[self.xcoord].shape[0]+1]
if self.IRANGE==None:
self.IRANGE = [0,self[self.xcoord].shape[1]+1]
# Check the dimension size
if self.JRANGE[1] > self[self.xcoord].shape[0]+1:
print 'Warning JRANGE outside of size range. Setting equal size.'
self.JRANGE[1] = self[self.xcoord].shape[0]+1
if self.IRANGE[1] > self[self.xcoord].shape[1]+1:
print 'Warning JRANGE outside of size range. Setting equal size.'
self.IRANGE[1] = self[self.xcoord].shape[1]+1
self.X = self[self.xcoord][self.JRANGE[0]:self.JRANGE[1],self.IRANGE[0]:self.IRANGE[1]]
self.Y = self[self.ycoord][self.JRANGE[0]:self.JRANGE[1],self.IRANGE[0]:self.IRANGE[1]]
self.xlims = [self.X.min(),self.X.max()]
self.ylims = [self.Y.min(),self.Y.max()]
# Load the long_name and units from the variable
try:
self.long_name = self.nc.variables[varname].long_name
except:
self.long_name = varname
try:
self.units = self.nc.variables[varname].units
except:
self.units = ' '
# Set the grid type
if self.xcoord[-3:]=='rho':
self.gridtype='rho'
self.mask=self.mask_rho
elif self.xcoord[-3:]=='n_u':
self.gridtype='u'
self.mask=self.mask_u
elif self.xcoord[-3:]=='n_v':
self.gridtype='v'
self.mask=self.mask_v
def _checkVertCoords(self,varname):
"""
Load the vertical coordinate info
"""
# First put K into a list
#if not type(self.K)=='list':
# self.K = [self.K]
try:
K = self.K[0] # a list
self.K = self.K
except:
# not a list
self.K = [self.K]
C = self.varcoords[varname].split()
ndim = len(C)
if ndim == 4:
self.zcoord = C[2]
self.Nz = len(self[self.zcoord])
if self.K[0] == -99:
self.K = range(0,self.Nz)
if self.zlayer==True: # Load all layers when zlayer is true
self.Z = np.array(self.K)
self.K = range(0,self.Nz)
if self.zcoord == 's_rho':
self.S = self.s_rho[self.K]
self.C = self.Cs_r[self.K]
elif self.zcoord == 's_w':
self.S = self.s_w[self.K]
self.C = self.Cs_w[self.K]
def _readVertCoords(self):
"""
Read the vertical coordinate information
"""
nc = self.nc
self.Cs_r = nc.variables['Cs_r'][:]
self.Cs_w = nc.variables['Cs_w'][:]
self.s_rho = nc.variables['s_rho'][:]
self.s_w = nc.variables['s_w'][:]
self.hc = nc.variables['hc'][:]
self.Vstretching = nc.variables['Vstretching'][:]
self.Vtransform = nc.variables['Vtransform'][:]
def _loadVarCoords(self):
"""
Load the variable coordinates into a dictionary
"""
self.varcoords={}
for vv in self.nc.variables.keys():
if hasattr(self.nc.variables[vv],'coordinates'):
self.varcoords.update({vv:self.nc.variables[vv].coordinates})
def _openNC(self):
"""
Load the netcdf object
"""
try:
self.nc = MFDataset(self.romsfile)
except:
self.nc = Dataset(self.romsfile, 'r')
def _loadTime(self):
"""
Load the netcdf time as a vector datetime objects
"""
#nc = Dataset(self.ncfile, 'r', format='NETCDF4')
nc = self.nc
t = nc.variables['ocean_time']
self.time = num2date(t[:],t.units)
self.Nt = np.size(self.time)
def __getitem__(self,y):
x = self.__dict__.__getitem__(y)
return x
def __setitem__(self,key,value):
if key == 'varname':
self.varname=value
self._checkCoords(value)
else:
self.__dict__[key]=value
class ROMSLagSlice(ROMS):
"""ROMS Lagrangian slice class"""
def __init__(self,x,y,time,width,nwidth,romsfile,**kwargs):
# Load the ROMS file
ROMS.__init__(self,romsfile,**kwargs)
# Clip points outside of the time and domain limits
self._clip_points(x,y,time)
# Create an array with the slice coordinates
self._create_slice_coords(width,nwidth)
# Reproject coordinates into distance along- and across-track
self._project_coords()
def __call__(self,varname):
"""
Load the variable name and interpolate onto all time steps
"""
# Load the data
self.loadData(varname=varname,tstep=range(self.Nt))
# Interpolate onto the time step
self.slicedata=np.zeros((self.Nt,self.ntrack,self.nwidth))
print 'Interpolating slice data...'
for tt in range(self.Nt):
#print 'Interpolating step %d of %d...'%(tt,self.Nt)
self.slicedata[tt,...]=\
self.interp(self.data[tt,...].squeeze())
def interp(self,phi):
"""
Interpolate onto the lagrangian grid
"""
if self.xcoord == 'lon_rho':
xyout = np.array([self.lonslice.ravel(),self.latslice.ravel()]).T
if not self.__dict__.has_key('Frho'):
xy = np.array([self.lon_rho.ravel(),self.lat_rho.ravel()]).T
Frho = interpXYZ(xy, xyout)
F = Frho
elif self.xcoord=='lon_psi':
xyout = np.array([self.lonslice.ravel(),self.latslice.ravel()]).T
if not self.__dict__.has_key('Fpsi'):
xy = np.array([self.lon_psi.ravel(),self.lat_psi.ravel()]).T
Fpsi = interpXYZ(xy, xyout)
F = Fpsi
data = F(phi.ravel())
return data.reshape((self.ntrack,self.nwidth))
def tinterp(self,dt):
"""
Interpolate from the lagrangian grid to the timestep along the track
at t = t0 + dt
"""
# Find the high and low indices
tlow = np.zeros((self.ntrack,),np.int16)
thigh = np.zeros((self.ntrack,),np.int16)
for ii in range(self.ntrack):
ind = np.argwhere(self.track_tsec[ii]+dt>=self.tsec)
if ind.size>0:
tlow[ii]=ind[-1]
else:
tlow[ii]=0
thigh[ii] = min(tlow[ii]+1,self.Nt)
# Calculate the interpolation weights
w1 =\
(self.track_tsec+dt-self.tsec[tlow])/(self.tsec[thigh]-self.tsec[tlow])
w1 = np.repeat(w1[...,np.newaxis],self.nwidth,axis=-1)
return (1.-w1)*self.slicedata[tlow,range(self.ntrack),:] +\
w1*self.slicedata[thigh,range(self.ntrack),:]
def project(self,lon,lat):
"""
Projects the coordinates in lon/lat into lagrangian coordinates
"""
xyin = np.array([self.lonslice.ravel(),self.latslice.ravel()]).T
xy = np.array([lon,lat]).T
if len(xy.shape)==1:
xy = xy[np.newaxis,...]
F = interpXYZ(xyin, xy)
return F(self.Xalong.ravel()), F(self.Ycross.ravel())
def pcolor(self,z,**kwargs):
scale=0.001
X = self.Xalong*scale
Y = self.Ycross*scale
ax=plt.gca()
h=plt.pcolormesh(X,Y,z,**kwargs)
ax.set_xlim([X.min(),X.max()])
ax.set_ylim([Y.min(),Y.max()])
return h
def contour(self,z,VV,filled=True,**kwargs):
scale=0.001
X = self.Xalong*scale
Y = self.Ycross*scale
ax=plt.gca()
if filled:
h=plt.contourf(X,Y,z,VV,**kwargs)
else:
h=plt.contour(X,Y,z,VV,**kwargs)
ax.set_xlim([X.min(),X.max()])
ax.set_ylim([Y.min(),Y.max()])
return h
def _clip_points(self,x,y,time):
time = np.array(time)
# Convert both times and check it is inside of the time domain
self.tsec = othertime.SecondsSince(self.time,basetime=self.time[0])
ttrack = othertime.SecondsSince(time,basetime=self.time[0])
indtime = operator.and_(ttrack>=0,ttrack<=self.tsec[-1])
# Check for points inside of the spatial domain
indx = operator.and_(x>=self.X.min(),x<=self.X.max())
indy = operator.and_(y>=self.Y.min(),y<=self.Y.max())
indxy = operator.and_(indx,indy)
ind = operator.and_(indtime,indxy)
self.track_time=time[ind]
self.track_tsec = othertime.SecondsSince(self.track_time,basetime=self.time[0])
self.track_x = x[ind]
self.track_y = y[ind]
self.ntrack = self.track_x.shape[0]
def _create_slice_coords(self,width,nwidth):
"""
Create the lagrangian coordinates
These are for interpolation
"""
self.centreline= MyLine([[self.track_x[ii],self.track_y[ii]]\
for ii in range(self.ntrack)])
# Compute the normalized distance along the line
normdist = (self.track_tsec-self.track_tsec[0])\
/(self.track_tsec[-1]-self.track_tsec[0])
#P = line.perpendicular(0.4,1.)
perplines = [self.centreline.perpline(normdist[ii],width) \
for ii in range(self.ntrack)]
self.nwidth=nwidth
# Initialize the output coordinates
self.lonslice = np.zeros((self.ntrack,self.nwidth))
self.latslice = np.zeros((self.ntrack,self.nwidth))
for ii,ll in enumerate(perplines):
points = ll.multipoint(self.nwidth)
for jj,pp in enumerate(points):
self.lonslice[ii,jj]=pp.x
self.latslice[ii,jj]=pp.y
def _project_coords(self):
"""
Project the slice into along and across track coordinates
These coordinates are for plotting only
"""
def dist(x,x0,y,y0):
return np.sqrt( (x-x0)**2. + (y-y0)**2. )
# Convert the slice to lambert conformal
LL = np.array([self.lonslice.ravel(),self.latslice.ravel()])
XY = ll2lcc(LL.T)
xslice = XY[:,0].reshape((self.ntrack,self.nwidth))
yslice = XY[:,1].reshape((self.ntrack,self.nwidth))
# Get the mid-point of the line and calculate the along-track distance
xmid = xslice[:,self.nwidth//2]
ymid = yslice[:,self.nwidth//2]
along_dist = np.zeros((self.ntrack,))
along_dist[1:] = np.cumsum(dist(xmid[1:],xmid[:-1],ymid[1:],ymid[:-1]))
# Get the across track distance
xend = xslice[0,:]
yend = yslice[0,:]
acrossdist = np.zeros((self.nwidth,))
acrossdist[1:] = np.cumsum(dist(xend[1:],xend[:-1],yend[1:],yend[:-1]))
acrossdist -= acrossdist.mean()
self.Ycross,self.Xalong =np.meshgrid(acrossdist,along_dist)
def interp(self,phi):
"""
Interpolate onto the lagrangian grid
"""
if self.xcoord == 'lon_rho':
xyout = np.array([self.lonslice.ravel(),self.latslice.ravel()]).T
if not self.__dict__.has_key('Frho'):
xy = np.array([self.lon_rho.ravel(),self.lat_rho.ravel()]).T
Frho = interpXYZ(xy, xyout)
F = Frho
elif self.xcoord=='lon_psi':
xyout = np.array([self.lonslice.ravel(),self.latslice.ravel()]).T
if not self.__dict__.has_key('Fpsi'):
xy = np.array([self.lon_psi.ravel(),self.lat_psi.ravel()]).T
Fpsi = interpXYZ(xy, xyout)
F = Fpsi
data = F(phi.ravel())
return data.reshape((self.ntrack,self.nwidth))
class ROMSslice(ROMS):
"""
Class for slicing ROMS data
"""
def __init__(self,ncfile,lon,lat,**kwargs):
"""
"""
ROMS.__init__(self,ncfile,**kwargs)
self.xyout = np.array([lon,lat]).T
self.nslice = self.xyout.shape[0]
def __call__(self,varname):
# Load the data
dataslice = self.loadData(varname=varname)
ndim = dataslice.ndim
# Create the interpolation object
self.xy = np.array([self.X.ravel(),self.Y.ravel()]).T
self.F = interpXYZ(self.xy,self.xyout)
Nt = len(self.tstep)
Nk = len(self.K)
# Interpolate onto the output data
data = np.zeros((Nt,Nk,self.nslice))
if ndim == 2:
return self.F(dataslice.ravel())
elif Nt>1 and Nk==1:
for tt in range(Nt):
data[tt,:,:] = self.F(dataslice[tt,:,:].ravel())
elif Nk>1 and Nt==1:
for kk in range(Nk):
data[:,kk,:] = self.F(dataslice[:,kk,:].ravel())
else: # 4D array
for kk in range(Nk):
for tt in range(Nt):
data[tt,kk,:] = self.F(dataslice[tt,kk,:,:].ravel())
data[data>1e36]=0.
return data.squeeze()
def lagrangian(self,varname,time):
"""
Lagrangian slice
Returns all of the data at each point along the slice with the
starting point for each slice beginning at time.
"""
self.tstep = range(self.Nt)
data = self.__call__(varname)
# Find the start time index
t0 = [self.getTstep(tt,tt)[0] for tt in time]
nt = self.Nt - min(t0)
sz = (nt,)+data.shape[1::]
dataout = np.zeros(sz)
for ii in range(self.nslice):
t1 = self.Nt-t0[ii]
dataout[0:t1,...,ii] = data[t0[ii]::,...,ii]
return dataout
class roms_timeseries(ROMS, timeseries):
"""
Class for loading a timeseries object from ROMS model output
"""
IJ = False
varname = 'u'
zlayer=False
def __init__(self,ncfile,XY,z=None,**kwargs):
"""
Loads a time series from point X,Y. Set z = None (default) to load all layers
if self.IJ = True, loads index X=I, Y=J directly
"""
self.__dict__.update(kwargs)
self.XY = XY
self.z = z
# Initialise the class
ROMS.__init__(self,ncfile,varname=self.varname,K=[-99])
self.tstep = range(0,self.Nt) # Load all time steps
self.update()
def update(self):
"""
Updates the class
"""
#
self._checkCoords(self.varname)
# Load I and J indices from the coordinates
self.setIJ(self.XY)
# Load the vertical coordinates
if not self.z == None:
self.zlayer = True
if self.zlayer == False:
if self.ndim==4:
self.Z = self.calcDepth()[:,self.JRANGE[0]:self.JRANGE[1],\
self.IRANGE[0]:self.IRANGE[1]].squeeze()
else:
self.Z = self.z
# Load the data into a time series object
timeseries.__init__(self,self.time[self.tstep],self.loadData())
def contourf(self,clevs=20,**kwargs):
"""
z-t contour plot of the time series
"""
h1 = plt.contourf(self.time[self.tstep],self.Z,self.y.T,clevs,**kwargs)
#plt.colorbar()
plt.xticks(rotation=17)
return h1
def setIJ(self,xy):
if self.IJ:
I0 = xy[0]
J0 = xy[1]
else:
ind = self.findNearset(xy[0],xy[1],grid=self.gridtype)
J0=ind[0][0]
I0=ind[0][1]
self.JRANGE = [J0,J0+1]
self.IRANGE = [I0,I0+1]
def __setitem__(self,key,value):
if key == 'varname':
self.varname=value
self.update()
elif key == 'XY':
self.XY = value
self.update()
else:
self.__dict__[key]=value
class roms_subset(roms_grid):
"""
Class for subsetting ROMS output
"""
gridfile = None
def __init__(self,ncfiles,bbox,timelims,**kwargs):
self.__dict__.update(kwargs)
if self.gridfile==None:
self.gridfile=ncfiles[0]
self.ncfiles = ncfiles
self.x0 = bbox[0]
self.x1 = bbox[1]
self.y0 = bbox[2]
self.y1 = bbox[3]
# Step 1) Find the time steps
self.t0 = datetime.strptime(timelims[0],'%Y%m%d%H%M%S')
self.t1 = datetime.strptime(timelims[1],'%Y%m%d%H%M%S')
# Multifile object
ftime = MFncdap(ncfiles,timevar='ocean_time')
ind0 = othertime.findNearest(self.t0,ftime.time)
ind1 = othertime.findNearest(self.t1,ftime.time)
self.time = ftime.time[ind0:ind1]
self.tind,self.fname = ftime(self.time) # list of time indices and corresponding files
self.Nt = len(self.tind)
# Step 2) Subset the grid variables
roms_grid.__init__(self,self.gridfile)
self.SubsetGrid()
# Step 3) Read the vertical coordinate variables
self.ReadVertCoords()
def SubsetGrid(self):
"""
Subset the grid variables
"""
#Find the grid indices
ind = self.findNearset(self.x0,self.y0)
self.J0=ind[0][0]
self.I0=ind[0][1]
ind = self.findNearset(self.x1,self.y1)
self.J1=ind[0][0]
self.I1=ind[0][1]
# Define the dimensions
M = self.J1-self.J0
N = self.I1-self.I0
self.eta_rho = M
self.xi_rho = N
self.eta_psi = M-1
self.xi_psi = N-1
self.eta_u = M-1
self.xi_u = N
self.eta_v = M
self.xi_v = N-1
# Subset the horizontal coordinates
self.lon_rho = self.lon_rho[self.J0:self.J1,self.I0:self.I1]
self.lat_rho = self.lat_rho[self.J0:self.J1,self.I0:self.I1]
self.mask_rho = self.mask_rho[self.J0:self.J1,self.I0:self.I1]
self.lon_psi = self.lon_psi[self.J0:self.J1-1,self.I0:self.I1-1]
self.lat_psi = self.lat_psi[self.J0:self.J1-1,self.I0:self.I1-1]
self.mask_psi = self.mask_psi[self.J0:self.J1-1,self.I0:self.I1-1]
self.lon_u = self.lon_u[self.J0:self.J1-1,self.I0:self.I1]
self.lat_u = self.lat_u[self.J0:self.J1-1,self.I0:self.I1]
self.mask_u = self.mask_u[self.J0:self.J1-1,self.I0:self.I1]
self.lon_v = self.lon_v[self.J0:self.J1,self.I0:self.I1-1]
self.lat_v = self.lat_v[self.J0:self.J1,self.I0:self.I1-1]
self.mask_v = self.mask_v[self.J0:self.J1,self.I0:self.I1-1]
self.h = self.h[self.J0:self.J1,self.I0:self.I1]
self.angle = self.angle[self.J0:self.J1,self.I0:self.I1]
def ReadVertCoords(self):
"""
"""
nc = Dataset(self.fname[0])
self.Cs_r = nc.variables['Cs_r'][:]
#self.Cs_w = nc.variables['Cs_w'][:]
self.s_rho = nc.variables['s_rho'][:]
#self.s_w = nc.variables['s_w'][:]
self.hc = nc.variables['hc'][:]
self.Vstretching = nc.variables['Vstretching'][:]
self.Vtransform = nc.variables['Vtransform'][:]
nc.close()
def ReadData(self,tstep):
"""
Reads the data from the file for the present time step
"""
fname = self.fname[tstep]
t0 = self.tind[tstep]
print 'Reading data at time: %s...'%datetime.strftime(self.time[tstep],'%Y-%m-%d %H:%M:%S')
nc = Dataset(fname)
self.ocean_time = nc.variables['ocean_time'][t0]
self.zeta = nc.variables['zeta'][t0,self.J0:self.J1,self.I0:self.I1]
self.temp = nc.variables['temp'][t0,:,self.J0:self.J1,self.I0:self.I1]
self.salt = nc.variables['salt'][t0,:,self.J0:self.J1,self.I0:self.I1]
self.u = nc.variables['u'][t0,:,self.J0:self.J1-1,self.I0:self.I1]
self.v = nc.variables['v'][t0,:,self.J0:self.J1,self.I0:self.I1-1]
nc.close()
def Writefile(self,outfile,verbose=True):
"""
Writes subsetted grid and coordinate variables to a netcdf file
Code modified from roms.py in the Octant package
"""
self.outfile = outfile
Mp, Lp = self.lon_rho.shape
M, L = self.lon_psi.shape
N = self.s_rho.shape[0] # vertical layers
pdb.set_trace()
xl = self.lon_rho[self.mask_rho==1.0].ptp()
el = self.lat_rho[self.mask_rho==1.0].ptp()
# Write ROMS grid to file
nc = Dataset(outfile, 'w', format='NETCDF3_CLASSIC')
nc.Description = 'ROMS subsetted history file'
nc.Author = ''
nc.Created = datetime.now().isoformat()
nc.type = 'ROMS HIS file'
nc.createDimension('xi_rho', Lp)
nc.createDimension('xi_u', Lp)
nc.createDimension('xi_v', L)
nc.createDimension('xi_psi', L)
nc.createDimension('eta_rho', Mp)
nc.createDimension('eta_u', M)
nc.createDimension('eta_v', Mp)
nc.createDimension('eta_psi', M)
nc.createDimension('s_rho', N)
nc.createDimension('ocean_time', None)
nc.createVariable('xl', 'f8', ())
nc.variables['xl'].units = 'meters'
nc.variables['xl'] = xl
nc.createVariable('el', 'f8', ())
nc.variables['el'].units = 'meters'
nc.variables['el'] = el
nc.createVariable('spherical', 'S1', ())
nc.variables['spherical'] = 'F'
def write_nc_var(var, name, dimensions, units=None):
nc.createVariable(name, 'f8', dimensions)
if units is not None:
nc.variables[name].units = units
nc.variables[name][:] = var
if verbose:
print ' ... wrote ', name
def create_nc_var(name, dimensions, units=None):
nc.createVariable(name, 'f8', dimensions)
if units is not None:
nc.variables[name].units = units
if verbose:
print ' ... wrote ', name
# Grid variables
write_nc_var(self.angle, 'angle', ('eta_rho', 'xi_rho'))
write_nc_var(self.h, 'h', ('eta_rho', 'xi_rho'), 'meters')
write_nc_var(self.mask_rho, 'mask_rho', ('eta_rho', 'xi_rho'))
write_nc_var(self.mask_u, 'mask_u', ('eta_u', 'xi_u'))
write_nc_var(self.mask_v, 'mask_v', ('eta_v', 'xi_v'))
write_nc_var(self.mask_psi, 'mask_psi', ('eta_psi', 'xi_psi'))
write_nc_var(self.lon_rho, 'lon_rho', ('eta_rho', 'xi_rho'), 'meters')
write_nc_var(self.lat_rho, 'lat_rho', ('eta_rho', 'xi_rho'), 'meters')
write_nc_var(self.lon_u, 'lon_u', ('eta_u', 'xi_u'), 'meters')
write_nc_var(self.lat_u, 'lat_u', ('eta_u', 'xi_u'), 'meters')
write_nc_var(self.lon_v, 'lon_v', ('eta_v', 'xi_v'), 'meters')
write_nc_var(self.lat_v, 'lat_v', ('eta_v', 'xi_v'), 'meters')
write_nc_var(self.lon_psi, 'lon_psi', ('eta_psi', 'xi_psi'), 'meters')
write_nc_var(self.lat_psi, 'lat_psi', ('eta_psi', 'xi_psi'), 'meters')
# Vertical coordinate variables
write_nc_var(self.s_rho, 's_rho', ('s_rho'))
write_nc_var(self.Cs_r, 'Cs_r', ('s_rho'))
write_nc_var(self.hc, 'hc', ())
write_nc_var(self.Vstretching, 'Vstretching', ())
write_nc_var(self.Vtransform, 'Vtransform', ())
# Create the data variables
create_nc_var('ocean_time',('ocean_time'),'seconds since 1970-01-01 00:00:00')
create_nc_var('zeta',('ocean_time','eta_rho','xi_rho'),'meter')
create_nc_var('salt',('ocean_time','s_rho','eta_rho','xi_rho'),'psu')
create_nc_var('temp',('ocean_time','s_rho','eta_rho','xi_rho'),'degrees C')
create_nc_var('u',('ocean_time','s_rho','eta_u','xi_u'),'meter second-1')
create_nc_var('v',('ocean_time','s_rho','eta_v','xi_v'),'meter second-1')
nc.close()
def Writedata(self, tstep):
nc = Dataset(self.outfile, 'a')
nc.variables['ocean_time'][tstep]=self.ocean_time
nc.variables['zeta'][tstep,:,:]=self.zeta
nc.variables['salt'][tstep,:,:,:]=self.salt
nc.variables['temp'][tstep,:,:,:]=self.temp
nc.variables['u'][tstep,:,:,:]=self.u
nc.variables['v'][tstep,:,:,:]=self.v
nc.close()
def Go(self):
"""
Downloads and append each time step to a file
"""
for ii in range(0,self.Nt):
self.ReadData(ii)
self.Writedata(ii)
print '##################\nDone!\n##################'
class roms_interp(roms_grid):
"""
Class for intperpolating ROMS output in space and time
"""
utmzone = 15
isnorth = True
# Interpolation options
interpmethod='idw' # 'nn', 'idw', 'kriging', 'griddata'
NNear=3
p = 1.0 # power for inverse distance weighting
# kriging options
varmodel = 'spherical'
nugget = 0.1
sill = 0.8
vrange = 250.0
def __init__(self,romsfile, xi, yi, zi, timei, **kwargs):
self.__dict__.update(kwargs)
self.romsfile = romsfile
self.xi = xi
self.yi = yi
self.zi = zi
self.timei = timei
# Step 1) Find the time steps
self.t0 = timei[0]
self.t1 = timei[-1]
# Multifile object
ftime = MFncdap(self.romsfile,timevar='ocean_time')
ind0 = othertime.findNearest(self.t0,ftime.time)
ind1 = othertime.findNearest(self.t1,ftime.time)
self.time = ftime.time[ind0:ind1+1]
self.tind,self.fname = ftime(self.time) # list of time indices and corresponding files
# Step 2) Prepare the grid variables for the interpolation class
roms_grid.__init__(self,self.romsfile[0])
# rho points
x,y = self.utmconversion(self.lon_rho,self.lat_rho,self.utmzone,self.isnorth)
self.xy_rho = np.vstack((x[self.mask_rho==1],y[self.mask_rho==1])).T
# uv point (averaged onto interior rho points)
self.mask_uv = self.mask_rho[0:-1,0:-1]
x = x[0:-1,0:-1]
y = y[0:-1,0:-1]
self.xy_uv = np.vstack((x[self.mask_uv==1],y[self.mask_uv==1])).T
# Step 3) Build the interpolants for rho and uv points
#self.xy_out = np.hstack((xi,yi))
#self.xy_out = np.hstack((xi[...,np.newaxis],yi[...,np.newaxis]))
self.xy_out = np.vstack((xi.ravel(),yi.ravel())).T
self.Frho = interpXYZ(self.xy_rho,self.xy_out,method=self.interpmethod,NNear=self.NNear,\
p=self.p,varmodel=self.varmodel,nugget=self.nugget,sill=self.sill,vrange=self.vrange)
self.Fuv = interpXYZ(self.xy_uv,self.xy_out,method=self.interpmethod,NNear=self.NNear,\
p=self.p,varmodel=self.varmodel,nugget=self.nugget,sill=self.sill,vrange=self.vrange)
# Read the vertical coordinate
self.ReadVertCoords()
# Dimesions sizes
self.Nx = self.xy_out.shape[0]
self.Nz = self.zi.shape[0]
self.Nt = len(self.timei)
self.Nz_roms = self.s_rho.shape[0]
self.Nt_roms = self.time.shape[0]
def interp(self,zinterp='linear',tinterp='linear',setUV=True,seth=True):
"""
Performs the interpolation in this order:
1) Interpolate onto the horizontal coordinates
2) Interpolate onto the vertical coordinates
3) Interpolate onto the time coordinates
"""
# Initialise the output arrays @ roms time step
zetaroms, temproms, saltroms, uroms, vroms = self.initArrays(self.Nt_roms,self.Nx,self.Nz)
tempold = np.zeros((self.Nz_roms,self.Nx))
saltold = np.zeros((self.Nz_roms,self.Nx))
uold = np.zeros((self.Nz_roms,self.Nx))
vold = np.zeros((self.Nz_roms,self.Nx))
# Interpolate h
h = self.Frho(self.h[self.mask_rho==1])
# Loop through each time step
for tstep in range(0,self.Nt_roms):
# Read all variables
self.ReadData(tstep)
# Interpolate zeta
if seth:
zetaroms[tstep,:] = self.Frho(self.zeta[self.mask_rho==1])
# Interpolate other 3D variables
for k in range(0,self.Nz_roms):
tmp = self.temp[k,:,:]
tempold[k,:] = self.Frho(tmp[self.mask_rho==1])
tmp = self.salt[k,:,:]
saltold[k,:] = self.Frho(tmp[self.mask_rho==1])
if setUV:
tmp = self.u[k,:,:]
uold[k,:] = self.Fuv(tmp[self.mask_uv==1])
tmp = self.v[k,:,:]
vold[k,:] = self.Fuv(tmp[self.mask_uv==1])
####added by dongyu, adding a low-pass filter####
vft=5
uold[abs(uold)>vft] = 0.0
vold[abs(vold)>vft] = 0.0
#pdb.set_trace()
# Calculate depths (zeta dependent)
#zroms = get_depth(self.s_rho,self.Cs_r,self.hc, h, zetaroms[tstep,:], Vtransform=self.Vtransform)
zroms = get_depth(self.s_rho,self.Cs_r,self.hc, h, zeta=zetaroms[tstep,:], Vtransform=self.Vtransform)
#pdb.set_trace()
# Interpolate vertically
for ii in range(0,self.Nx):
y = tempold[:,ii]
Fz = interpolate.interp1d(zroms[:,ii],y,kind=zinterp,bounds_error=False,fill_value=y[0])
temproms[tstep,:,ii] = Fz(self.zi)
y = saltold[:,ii]
Fz = interpolate.interp1d(zroms[:,ii],y,kind=zinterp,bounds_error=False,fill_value=y[0])
saltroms[tstep,:,ii] = Fz(self.zi)
if setUV:
y = uold[:,ii]
Fz = interpolate.interp1d(zroms[:,ii],y,kind=zinterp,bounds_error=False,fill_value=y[0])
uroms[tstep,:,ii] = Fz(self.zi)
y = vold[:,ii]
Fz = interpolate.interp1d(zroms[:,ii],y,kind=zinterp,bounds_error=False,fill_value=y[0])
vroms[tstep,:,ii] = Fz(self.zi)
#pdb.set_trace()
# End time loop
# Initialise the output arrays @ output time step
# Interpolate temporally
if self.Nt_roms > 1:
print 'Temporally interpolating ROMS variables...'
troms = othertime.SecondsSince(self.time)
tout = othertime.SecondsSince(self.timei)
if seth:
print '\tzeta...'
Ft = interpolate.interp1d(troms,zetaroms,axis=0,kind=tinterp,bounds_error=False)
zetaout = Ft(tout)
else:
zetaout=-1
print '\ttemp...'
Ft = interpolate.interp1d(troms,temproms,axis=0,kind=tinterp,bounds_error=False)
tempout = Ft(tout)
print '\tsalt...'
Ft = interpolate.interp1d(troms,saltroms,axis=0,kind=tinterp,bounds_error=False)
saltout = Ft(tout)
if setUV:
print '\tu...'
Ft = interpolate.interp1d(troms,uroms,axis=0,kind=tinterp,bounds_error=False)
uout = Ft(tout)
print '\tv...'
Ft = interpolate.interp1d(troms,vroms,axis=0,kind=tinterp,bounds_error=False)
vout = Ft(tout)
else:
uout = vout = -1
#pdb.set_trace()
else:
zetaout = zetaroms
tempout = temproms
saltout = saltroms
uout = uroms
vout = vroms
return zetaout, tempout, saltout, uout, vout
def initArrays(self,Nt,Nx,Nz):
zetaout = np.zeros((Nt,Nx))
tempout = np.zeros((Nt,Nz,Nx))
saltout = np.zeros((Nt,Nz,Nx))
uout = np.zeros((Nt,Nz,Nx))
vout = np.zeros((Nt,Nz,Nx))
return zetaout, tempout, saltout, uout, vout
def ReadData(self,tstep):
"""
Reads the data from the file for the present time step
"""
fname = self.fname[tstep]
t0 = self.tind[tstep]
print 'Interpolating data at time: %s of %s...'%(datetime.strftime(self.time[tstep],'%Y-%m-%d %H:%M:%S'),\
datetime.strftime(self.time[-1],'%Y-%m-%d %H:%M:%S'))
nc = Dataset(fname)
self.ocean_time = nc.variables['ocean_time'][t0]
self.zeta = nc.variables['zeta'][t0,:,:]
self.temp = nc.variables['temp'][t0,:,:,:]
self.salt = nc.variables['salt'][t0,:,:,:]
u = nc.variables['u'][t0,:,:,:]
v = nc.variables['v'][t0,:,:,:]
nc.close()
# Rotate the vectors
self.u,self.v = rotateUV( (u[...,:,0:-1]+u[...,:,1::])*0.5,(v[...,0:-1,:]+v[...,1::,:])*0.5,self.angle[0:-1,0:-1])
def ReadVertCoords(self):
"""
"""
nc = Dataset(self.romsfile[0])
self.Cs_r = nc.variables['Cs_r'][:]
#self.Cs_w = nc.variables['Cs_w'][:]
self.s_rho = nc.variables['s_rho'][:]
#self.s_w = nc.variables['s_w'][:]
self.hc = nc.variables['hc'][:]
self.Vstretching = nc.variables['Vstretching'][:]
self.Vtransform = nc.variables['Vtransform'][:]
nc.close()
class MFncdap(object):
"""
Multi-file class for opendap netcdf files
MFDataset module is not compatible with opendap data
"""
timevar = 'time'
def __init__(self,ncfilelist,**kwargs):
self.__dict__.update(kwargs)
self.timelookup = {}
self.time = np.zeros((0,))
for f in ncfilelist:
print f
nc = Dataset(f)
t = nc.variables[self.timevar]
time = num2date(t[:],t.units)
nc.close()
self.timelookup.update({f:time})
self.time = np.hstack((self.time,np.asarray(time)))
self.time = np.asarray(self.time)
def __call__(self,time):
"""
Return the filenames and time index of the closest time
"""
fname = []
tind =[]
for t in time:
flag=1
for f in self.timelookup.keys():
if t >= self.timelookup[f][0] and t<=self.timelookup[f][-1]:
# print 'Found tstep %s'%datetime.strptime(t,'%Y-%m-%d %H:%M:%S')
tind.append(othertime.findNearest(t,self.timelookup[f][:]))
fname.append(f)
flag=0
# if flag:
# print 'Warning - could not find matching file for time:%s'%datetime.strptime(t,'%Y-%m-%d %H:%M:%S')
# tind.append(-1)
# fname.append(-1)
return tind, fname
def get_depth(S,C,hc,h,zeta=None, Vtransform=1):
"""
Calculates the sigma coordinate depth
"""
if zeta == None:
zeta = 0.0*h
N = len(S)
#Nj,Ni = np.size(h)
shp = (N,)+h.shape
z = np.zeros(shp)
if Vtransform == 1:
for k in range(0,N):
z0 = (S[k]-C[k])*hc + C[k]*h
z[k,...] = z0 + (zeta *(1.0 + z0/h))
elif Vtransform == 2:
for k in range(0,N):
z0 = (hc*S[k]+C[k]*h)/(hc+h)
z[k,...] = zeta + (zeta+h)*z0
return z
def rotateUV(uroms,vroms,ang):
"""
Rotates ROMS output vectors to cartesian u,v
"""
u = uroms*np.cos(ang) - vroms*np.sin(ang)
v = uroms*np.sin(ang) + vroms*np.cos(ang)
return u,v
###############
## Testing
##grdfile = 'http://barataria.tamu.edu:8080/thredds/dodsC/txla_nesting6_grid/txla_grd_v4_new.nc'
#grdfile = 'C:\\Projects\\GOMGalveston\\MODELLING\\ROMS\\txla_grd_v4_new.nc'
##grd = roms_grid(grdfile)
#
##ncfiles = ['http://barataria.tamu.edu:8080/thredds/dodsC/txla_nesting6/ocean_his_%04d.nc'%i for i in range(1,3)]
##MF = MFncdap(ncfiles,timevar='ocean_time')
##
##tsteps = [datetime(2003,2,16)+timedelta(hours=i*4) for i in range(0,24)]
##tind,fname = MF(tsteps)
#
#ncfiles = ['http://barataria.tamu.edu:8080/thredds/dodsC/txla_nesting6/ocean_his_%04d.nc'%i for i in range(100,196)]
#timelims = ('20090501000000','20090701000000')
##timelims = ('20090501000000','20090502000000')
#bbox = [-95.53,-94.25,28.3,30.0]
#
#roms = roms_subset(ncfiles,bbox,timelims,gridfile=grdfile)
#outfile = 'C:\\Projects\\GOMGalveston\\MODELLING\\ROMS\\txla_subset_HIS_MayJun2009.nc'
#roms.Writefile(outfile)
#roms.Go()
#
##roms2 = roms_subset([outfile],bbox,timelims)
|
mit
|
h2educ/scikit-learn
|
examples/svm/plot_separating_hyperplane.py
|
294
|
1273
|
"""
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
nens/gislib
|
gislib/colors.py
|
1
|
9787
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
from matplotlib import cm
from matplotlib import colors
LANDUSE = {
1: '1 - BAG - Overig / Onbekend',
2: '2 - BAG - Woonfunctie',
3: '3 - BAG - Celfunctie',
4: '4 - BAG - Industriefunctie',
5: '5 - BAG - Kantoorfunctie',
6: '6 - BAG - Winkelfunctie',
7: '7 - BAG - Kassen',
8: '8 - BAG - Logiesfunctie',
9: '9 - BAG - Bijeenkomstfunctie',
10: '10 - BAG - Sportfunctie',
11: '11 - BAG - Onderwijsfunctie',
12: '12 - BAG - Gezondheidszorgfunctie',
13: '13 - BAG - Overig kleiner dan 50 (schuurtjes)',
14: '14 - BAG - Overig groter dan 50 (bedrijfspanden)',
15: '15 - BAG - None',
16: '16 - BAG - None',
17: '17 - BAG - None',
18: '18 - BAG - None',
19: '19 - BAG - None',
20: '20 - BAG - None',
21: '21 - Top10 - Water',
22: '22 - Top10 - Primaire wegen',
23: '23 - Top10 - Secundaire wegen',
24: '24 - Top10 - Tertiaire wegen',
25: '25 - Top10 - Bos/Natuur',
26: '26 - Top10 - Bebouwd gebied',
27: '27 - Top10 - Boomgaard',
28: '28 - Top10 - Fruitkwekerij',
29: '29 - Top10 - Begraafplaats',
30: '30 - Top10 - Agrarisch gras',
31: '31 - Top10 - Overig gras',
32: '32 - Top10 - Spoorbaanlichaam',
33: '33 - Top10 - None',
34: '34 - Top10 - None',
35: '35 - Top10 - None',
36: '36 - Top10 - None',
37: '37 - Top10 - None',
38: '38 - Top10 - None',
39: '39 - Top10 - None',
40: '40 - Top10 - None',
41: '41 - LGN - Agrarisch Gras',
42: '42 - LGN - Mais',
43: '43 - LGN - Aardappelen',
44: '44 - LGN - Bieten',
45: '45 - LGN - Granen',
46: '46 - LGN - Overige akkerbouw',
47: '47 - LGN - None',
48: '48 - LGN - Glastuinbouw',
49: '49 - LGN - Boomgaard',
50: '50 - LGN - Bloembollen',
51: '51 - LGN - None',
52: '52 - LGN - Gras overig',
53: '53 - LGN - Bos/Natuur',
54: '54 - LGN - None',
55: '55 - LGN - None',
56: '56 - LGN - Water (LGN)',
57: '57 - LGN - None',
58: '58 - LGN - Bebouwd gebied',
59: '59 - LGN - None',
61: '61 - CBS - Spoorwegen terrein',
62: '62 - CBS - Primaire wegen',
63: '63 - CBS - Woongebied',
64: '64 - CBS - Winkelgebied',
65: '65 - CBS - Bedrijventerrein',
66: '66 - CBS - Sportterrein',
67: '67 - CBS - Volkstuinen',
68: '68 - CBS - Recreatief terrein',
69: '69 - CBS - Glastuinbouwterrein',
70: '70 - CBS - Bos/Natuur',
71: '71 - CBS - Begraafplaats',
72: '72 - CBS - Zee',
73: '73 - CBS - Zoet water',
74: '74 - CBS - None',
75: '75 - CBS - None',
76: '76 - CBS - None',
77: '77 - CBS - None',
78: '78 - CBS - None',
79: '79 - CBS - None',
97: '97 - Overig - buitenland',
98: '98 - Top10 - erf',
99: '99 - Overig - Overig/Geen landgebruik'
}
LC_LANDUSE_BEIRA = [
(0.84, 1.0, 0.74),
(0.19, 0.08, 0.81),
(1.0, 0.49, 0.48),
(1.0, 0.49, 0.48),
(0.61, 0.62, 0.61),
]
LC_LANDUSE = [
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.741, 0.235, 0.322, 1.0),
(0.969, 0.765, 0.776, 1.0),
(0.969, 0.349, 0.224, 1.0),
(0.969, 0.22, 0.353, 1.0),
(0.808, 0.525, 0.58, 1.0),
(0.969, 0.588, 0.482, 1.0),
(0.741, 0.204, 0.192, 1.0),
(1.0, 0.412, 0.518, 1.0),
(0.969, 0.427, 0.388, 1.0),
(0.741, 0.443, 0.388, 1.0),
(0.741, 0.588, 0.549, 1.0),
(0.741, 0.235, 0.322, 1.0),
(0.741, 0.235, 0.322, 1.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.443, 1.0, 1.0),
(0.192, 0.204, 0.192, 1.0),
(0.42, 0.412, 0.42, 1.0),
(0.71, 0.698, 0.71, 1.0),
(0.0, 0.443, 0.29, 1.0),
(0.706, 0.706, 0.706, 1.0),
(0.451, 0.443, 0.0, 1.0),
(0.451, 0.443, 0.0, 1.0),
(0.906, 0.89, 0.906, 1.0),
(0.647, 1.0, 0.451, 1.0),
(0.647, 1.0, 0.451, 1.0),
(0.0, 0.0, 0.0, 1.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.647, 1.0, 0.451, 1.0),
(1.0, 1.0, 0.451, 1.0),
(0.808, 0.667, 0.388, 1.0),
(1.0, 0.0, 0.776, 1.0),
(0.906, 0.906, 0.0, 1.0),
(0.451, 0.302, 0.0, 1.0),
(0.0, 0.0, 0.0, 0.0),
(0.867, 1.0, 0.722, 1.0),
(0.451, 0.443, 0.0, 1.0),
(0.871, 0.443, 1.0, 1.0),
(0.0, 0.0, 0.0, 0.0),
(0.322, 1.0, 0.0, 1.0),
(0.0, 0.443, 0.29, 1.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.706, 0.706, 0.706, 1.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 1.0),
(0.0, 0.0, 0.0, 0.0),
(0.698, 0.929, 0.698, 1.0),
(0.588, 0.588, 0.588, 1.0),
(0.671, 0.671, 0.671, 1.0),
(0.0, 1.0, 0.0, 1.0),
(0.451, 0.541, 0.259, 1.0),
(0.224, 0.667, 0.0, 1.0),
(0.867, 1.0, 0.722, 1.0),
(0.0, 0.443, 0.29, 1.0),
(0.906, 0.89, 0.906, 1.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 0.0),
(0.906, 0.89, 0.906, 1.0),
]
def add_cmap_transparent(name):
""" Create and register a transparent colormap. """
cmap = colors.ListedColormap([(0, 0, 0, 0)])
cm.register_cmap(name, cmap)
def add_cmap_damage(name):
""" Create and register damage colormap. """
f = 0.05
cdict = {'red': [(0.0, 0, 0),
(00.01 * f, 0, 1),
(1.0, 1, 1)],
'green': [(0., 0.00, 0.00),
(00.01 * f, 0.00, 1.00),
(00.50 * f, 1.00, 0.65),
(10.00 * f, 0.65, 0.00),
(1., 0.00, 0.00)],
'blue': [(0., 0, 0),
(1., 0, 0)],
'alpha': [(0., 0, 0),
(00.01 * f, 0, 1),
(1., 1, 1)]}
cmap = colors.LinearSegmentedColormap('', cdict)
cm.register_cmap(name, cmap)
def add_cmap_shade(name, ctr):
""" Create and register shade colormap. """
cdict = {'red': [(0.0, 9.9, 0.0),
(ctr, 0.0, 1.0),
(1.0, 1.0, 9.9)],
'green': [(0.0, 9.9, 0.0),
(ctr, 0.0, 1.0),
(1.0, 1.0, 9.9)],
'blue': [(0.0, 9.9, 0.0),
(ctr, 0.0, 1.0),
(1.0, 1.0, 9.9)],
'alpha': [(0.0, 9.9, 1.0),
(ctr, 0.0, 0.0),
(1.0, 1.0, 9.9)]}
cmap = colors.LinearSegmentedColormap('', cdict)
cm.register_cmap(name, cmap)
def add_cmap_drought(name):
""" Create and register drought colormap. """
cdict = {
'red': [
(0.0, 0.0, 0.0),
(0.062, 0.0, 0.157),
(0.125, 0.157, 0.51),
(0.188, 0.51, 0.749),
(0.25, 0.749, 0.149),
(0.312, 0.149, 0.357),
(0.375, 0.357, 0.576),
(0.438, 0.576, 0.827),
(0.5, 0.827, 0.902),
(0.562, 0.902, 0.949),
(0.625, 0.949, 0.98),
(0.688, 0.98, 1.0),
(0.75, 1.0, 0.659),
(0.812, 0.659, 0.8),
(0.875, 0.8, 0.91),
(0.938, 0.91, 1.0),
(1.0, 1.0, 0.0)
],
'green': [
(0.0, 0.0, 0.149),
(0.062, 0.149, 0.314),
(0.125, 0.314, 0.533),
(0.188, 0.533, 0.824),
(0.25, 0.824, 0.451),
(0.312, 0.451, 0.62),
(0.375, 0.62, 0.8),
(0.438, 0.8, 1.0),
(0.5, 1.0, 0.902),
(0.562, 0.902, 0.941),
(0.625, 0.941, 0.965),
(0.688, 0.965, 1.0),
(0.75, 1.0, 0.0),
(0.812, 0.0, 0.286),
(0.875, 0.286, 0.502),
(0.938, 0.502, 0.749),
(1.0, 0.749, 0.0)
],
'blue': [
(0.0, 0.0, 0.451),
(0.062, 0.451, 0.631),
(0.125, 0.631, 0.812),
(0.188, 0.812, 1.0),
(0.25, 1.0, 0.0),
(0.312, 0.0, 0.243),
(0.375, 0.243, 0.471),
(0.438, 0.471, 0.749),
(0.5, 0.749, 0.0),
(0.562, 0.0, 0.322),
(0.625, 0.322, 0.529),
(0.688, 0.529, 0.749),
(0.75, 0.749, 0.0),
(0.812, 0.0, 0.208),
(0.875, 0.208, 0.447),
(0.938, 0.447, 0.749),
(1.0, 0.749, 0.0)
],
}
cmap = colors.LinearSegmentedColormap('', cdict)
cm.register_cmap(name, cmap)
def add_cmap_landuse(name):
""" Create and register a transparent colormap. """
cmap = colors.ListedColormap(LC_LANDUSE)
cm.register_cmap(name, cmap)
def add_cmap_landuse_beira(name):
""" Create and register a transparent colormap. """
cmap = colors.ListedColormap(LC_LANDUSE_BEIRA)
cm.register_cmap(name, cmap)
|
gpl-3.0
|
mitdbg/modeldb
|
client/verta/verta/_internal_utils/_utils.py
|
1
|
30525
|
# -*- coding: utf-8 -*-
import datetime
import glob
import inspect
import json
import numbers
import os
import re
import string
import subprocess
import sys
import threading
import time
import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from google.protobuf import json_format
from google.protobuf.struct_pb2 import Value, ListValue, Struct, NULL_VALUE
from ..external import six
from ..external.six.moves.urllib.parse import urljoin # pylint: disable=import-error, no-name-in-module
from .._protos.public.modeldb import CommonService_pb2 as _CommonService
try:
import pandas as pd
except ImportError: # pandas not installed
pd = None
try:
import tensorflow as tf
except ImportError: # TensorFlow not installed
tf = None
try:
import ipykernel
except ImportError: # Jupyter not installed
pass
else:
try:
from IPython.display import Javascript, display
try: # Python 3
from notebook.notebookapp import list_running_servers
except ImportError: # Python 2
import warnings
from IPython.utils.shimmodule import ShimWarning
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ShimWarning)
from IPython.html.notebookapp import list_running_servers
del warnings, ShimWarning # remove ad hoc imports from scope
except ImportError: # abnormally nonstandard installation of Jupyter
pass
try:
import numpy as np
except ImportError: # NumPy not installed
np = None
BOOL_TYPES = (bool,)
else:
BOOL_TYPES = (bool, np.bool_)
_GRPC_PREFIX = "Grpc-Metadata-"
_VALID_HTTP_METHODS = {'GET', 'POST', 'PUT', 'DELETE'}
_VALID_FLAT_KEY_CHARS = set(string.ascii_letters + string.digits + '_-/')
THREAD_LOCALS = threading.local()
THREAD_LOCALS.active_experiment_run = None
SAVED_MODEL_DIR = "/app/tf_saved_model/"
class Connection:
def __init__(self, scheme=None, socket=None, auth=None, max_retries=0, ignore_conn_err=False):
"""
HTTP connection configuration utility struct.
Parameters
----------
scheme : {'http', 'https'}, optional
HTTP authentication scheme.
socket : str, optional
Hostname and port.
auth : dict, optional
Verta authentication headers.
max_retries : int, default 0
Maximum number of times to retry a request on a connection failure. This only attempts retries
on HTTP codes {502, 503, 504} which commonly occur during back end connection lapses.
ignore_conn_err : bool, default False
Whether to ignore connection errors and instead return successes with empty contents.
"""
self.scheme = scheme
self.socket = socket
self.auth = auth
# TODO: retry on 404s, but only if we're sure it's not legitimate e.g. from a GET
self.retry = Retry(total=max_retries,
backoff_factor=1, # each retry waits (2**retry_num) seconds
method_whitelist=False, # retry on all HTTP methods
status_forcelist=(502, 503, 504), # only retry on these status codes
raise_on_redirect=False, # return Response instead of raising after max retries
raise_on_status=False) # return Response instead of raising after max retries
self.ignore_conn_err = ignore_conn_err
class Configuration:
def __init__(self, use_git=True, debug=False):
"""
Client behavior configuration utility struct.
Parameters
----------
use_git : bool, default True
Whether to use a local Git repository for certain operations.
"""
self.use_git = use_git
self.debug = debug
class LazyList(object):
# number of items to fetch per back end call in __iter__()
_ITER_PAGE_LIMIT = 100
def __init__(self, conn, conf, msg, endpoint, rest_method):
self._conn = conn
self._conf = conf
self._msg = msg # protobuf msg used to make back end calls
self._endpoint = endpoint
self._rest_method = rest_method
def __getitem__(self, index):
if isinstance(index, int):
# copy msg to avoid mutating `self`'s state
msg = self._msg.__class__()
msg.CopyFrom(self._msg)
msg.page_limit = 1
if index >= 0:
# convert zero-based indexing into page number
msg.page_number = index + 1
else:
# reverse page order to index from end
msg.ascending = not msg.ascending # pylint: disable=no-member
msg.page_number = abs(index)
response_msg = self._call_back_end(msg)
records = self._get_records(response_msg)
if (not records
and msg.page_number > response_msg.total_records): # pylint: disable=no-member
raise IndexError("index out of range")
id_ = records[0].id
return self._create_element(id_)
else:
raise TypeError("index must be integer, not {}".format(type(index)))
def __iter__(self):
# copy msg to avoid mutating `self`'s state
msg = self._msg.__class__()
msg.CopyFrom(self._msg)
msg.page_limit = self._ITER_PAGE_LIMIT
msg.page_number = 0 # this will be incremented as soon as we enter the loop
seen_ids = set()
total_records = float('inf')
while msg.page_limit*msg.page_number < total_records: # pylint: disable=no-member
msg.page_number += 1 # pylint: disable=no-member
response_msg = self._call_back_end(msg)
total_records = response_msg.total_records
ids = self._get_ids(response_msg)
for id_ in ids:
# skip if we've seen the ID before
if id_ in seen_ids:
continue
else:
seen_ids.add(id_)
yield self._create_element(id_)
def __len__(self):
# copy msg to avoid mutating `self`'s state
msg = self._msg.__class__()
msg.CopyFrom(self._msg)
msg.page_limit = msg.page_number = 1 # minimal request just to get total_records
response_msg = self._call_back_end(msg)
return response_msg.total_records
def _call_back_end(self, msg):
data = proto_to_json(msg)
if self._rest_method == "GET":
response = make_request(
self._rest_method,
self._endpoint.format(self._conn.scheme, self._conn.socket),
self._conn, params=data,
)
elif self._rest_method == "POST":
response = make_request(
self._rest_method,
self._endpoint.format(self._conn.scheme, self._conn.socket),
self._conn, json=data,
)
raise_for_http_error(response)
response_msg = json_to_proto(response.json(), msg.Response)
return response_msg
def _get_ids(self, response_msg):
return (record.id for record in self._get_records(response_msg))
def _get_records(self, response_msg):
"""Get the attribute of `response_msg` that is not `total_records`."""
raise NotImplementedError
def _create_element(self, id_):
"""Instantiate element to return to user."""
raise NotImplementedError
def make_request(method, url, conn, **kwargs):
"""
Makes a REST request.
Parameters
----------
method : {'GET', 'POST', 'PUT', 'DELETE'}
HTTP method.
url : str
URL.
conn : Connection
Connection authentication and configuration.
**kwargs
Parameters to requests.request().
Returns
-------
requests.Response
"""
if method.upper() not in _VALID_HTTP_METHODS:
raise ValueError("`method` must be one of {}".format(_VALID_HTTP_METHODS))
if conn.auth is not None:
# add auth to `kwargs['headers']`
kwargs.setdefault('headers', {}).update(conn.auth)
with requests.Session() as s:
s.mount(url, HTTPAdapter(max_retries=conn.retry))
try:
response = s.request(method, url, **kwargs)
except (requests.exceptions.BaseHTTPError,
requests.exceptions.RequestException) as e:
if not conn.ignore_conn_err:
raise e
else:
if response.ok or not conn.ignore_conn_err:
return response
# fabricate response
response = requests.Response()
response.status_code = 200 # success
response._content = six.ensure_binary("{}") # empty contents
return response
def raise_for_http_error(response):
"""
Raises a potential HTTP error with a back end message if provided, or a default error message otherwise.
Parameters
----------
response : :class:`requests.Response`
Response object returned from a `requests`-module HTTP request.
Raises
------
:class:`requests.HTTPError`
If an HTTP error occured.
"""
try:
response.raise_for_status()
except requests.HTTPError as e:
try:
reason = response.json()['message']
except (ValueError, # not JSON response
KeyError): # no 'message' from back end
six.raise_from(e, None) # use default reason
else:
# replicate https://github.com/psf/requests/blob/428f7a/requests/models.py#L954
if 400 <= response.status_code < 500:
cause = "Client"
elif 500 <= response.status_code < 600:
cause = "Server"
else: # should be impossible here, but sure okay
cause = "Unexpected"
message = "{} {} Error: {} for url: {}".format(response.status_code, cause, reason, response.url)
six.raise_from(requests.HTTPError(message, response=response), None)
def is_hidden(path): # to avoid "./".startswith('.')
return os.path.basename(path.rstrip('/')).startswith('.') and path != "."
def find_filepaths(paths, extensions=None, include_hidden=False, include_venv=False):
"""
Unravels a list of file and directory paths into a list of only filepaths by walking through the
directories.
Parameters
----------
paths : str or list of str
File and directory paths.
extensions : str or list of str, optional
What files to include while walking through directories. If not provided, all files will be
included.
include_hidden : bool, default False
Whether to include hidden files and subdirectories found while walking through directories.
include_venv : bool, default False
Whether to include Python virtual environment directories.
Returns
-------
filepaths : set
"""
if isinstance(paths, six.string_types):
paths = [paths]
paths = list(map(os.path.expanduser, paths))
if isinstance(extensions, six.string_types):
extensions = [extensions]
if extensions is not None:
# prepend period to file extensions where missing
extensions = map(lambda ext: ext if ext.startswith('.') else ('.' + ext), extensions)
extensions = set(extensions)
filepaths = set()
for path in paths:
if os.path.isdir(path):
for parent_dir, dirnames, filenames in os.walk(path):
if not include_hidden:
# skip hidden directories
dirnames[:] = [dirname for dirname in dirnames if not is_hidden(dirname)]
# skip hidden files
filenames[:] = [filename for filename in filenames if not is_hidden(filename)]
if not include_venv:
exec_path_glob = os.path.join(parent_dir, "{}", "bin", "python*")
dirnames[:] = [dirname for dirname in dirnames if not glob.glob(exec_path_glob.format(dirname))]
for filename in filenames:
if extensions is None or os.path.splitext(filename)[1] in extensions:
filepaths.add(os.path.join(parent_dir, filename))
else:
filepaths.add(path)
return filepaths
def proto_to_json(msg):
"""
Converts a `protobuf` `Message` object into a JSON-compliant dictionary.
The output preserves snake_case field names and integer representaions of enum variants.
Parameters
----------
msg : google.protobuf.message.Message
`protobuf` `Message` object.
Returns
-------
dict
JSON object representing `msg`.
"""
return json.loads(json_format.MessageToJson(msg,
including_default_value_fields=True,
preserving_proto_field_name=True,
use_integers_for_enums=True))
def json_to_proto(response_json, response_cls, ignore_unknown_fields=True):
"""
Converts a JSON-compliant dictionary into a `protobuf` `Message` object.
Parameters
----------
response_json : dict
JSON object representing a Protocol Buffer message.
response_cls : type
`protobuf` `Message` subclass, e.g. ``CreateProject.Response``.
ignore_unknown_fields : bool, default True
Whether to allow (and ignore) fields in `response_json` that are not defined in
`response_cls`. This is for forward compatibility with the back end; if the Client protos
are outdated and we get a response with new fields, ``True`` prevents an error.
Returns
-------
google.protobuf.message.Message
`protobuf` `Message` object represented by `response_json`.
"""
return json_format.Parse(json.dumps(response_json),
response_cls(),
ignore_unknown_fields=ignore_unknown_fields)
def to_builtin(obj):
"""
Tries to coerce `obj` into a built-in type, for JSON serialization.
Parameters
----------
obj
Returns
-------
object
A built-in equivalent of `obj`, or `obj` unchanged if it could not be handled by this function.
"""
# jump through ludicrous hoops to avoid having hard dependencies in the Client
cls_ = obj.__class__
obj_class = getattr(cls_, '__name__', None)
obj_module = getattr(cls_, '__module__', None)
# booleans
if isinstance(obj, BOOL_TYPES):
return True if obj else False
# NumPy scalars
if obj_module == "numpy" and obj_class.startswith(('int', 'uint', 'float', 'str')):
return obj.item()
# scientific library collections
if obj_class == "ndarray":
return obj.tolist()
if obj_class == "Series":
return obj.values.tolist()
if obj_class == "DataFrame":
return obj.values.tolist()
if obj_class == "Tensor" and obj_module == "torch":
return obj.detach().numpy().tolist()
if tf is not None and isinstance(obj, tf.Tensor): # if TensorFlow
try:
return obj.numpy().tolist()
except: # TF 1.X or not-eager execution
pass
# strings
if isinstance(obj, six.string_types): # prevent infinite loop with iter
return obj
if isinstance(obj, six.binary_type):
return six.ensure_str(obj)
# dicts and lists
if isinstance(obj, dict):
return {to_builtin(key): to_builtin(val) for key, val in six.viewitems(obj)}
try:
iter(obj)
except TypeError:
pass
else:
return [to_builtin(val) for val in obj]
return obj
def python_to_val_proto(raw_val, allow_collection=False):
"""
Converts a Python variable into a `protobuf` `Value` `Message` object.
Parameters
----------
raw_val
Python variable.
allow_collection : bool, default False
Whether to allow ``list``s and ``dict``s as `val`. This flag exists because some callers
ought to not support logging collections, so this function will perform the typecheck on `val`.
Returns
-------
google.protobuf.struct_pb2.Value
`protobuf` `Value` `Message` representing `val`.
"""
# TODO: check `allow_collection` before `to_builtin()` to avoid unnecessary processing
val = to_builtin(raw_val)
if val is None:
return Value(null_value=NULL_VALUE)
elif isinstance(val, bool): # did you know that `bool` is a subclass of `int`?
return Value(bool_value=val)
elif isinstance(val, numbers.Real):
return Value(number_value=val)
elif isinstance(val, six.string_types):
return Value(string_value=val)
elif isinstance(val, (list, dict)):
if allow_collection:
if isinstance(val, list):
list_value = ListValue()
list_value.extend(val) # pylint: disable=no-member
return Value(list_value=list_value)
else: # isinstance(val, dict)
if all([isinstance(key, six.string_types) for key in val.keys()]):
struct_value = Struct()
struct_value.update(val) # pylint: disable=no-member
return Value(struct_value=struct_value)
else: # protobuf's fault
raise TypeError("struct keys must be strings; consider using log_artifact() instead")
else:
raise TypeError("unsupported type {}; consider using log_attribute() instead".format(type(raw_val)))
else:
raise TypeError("unsupported type {}; consider using log_artifact() instead".format(type(raw_val)))
def val_proto_to_python(msg):
"""
Converts a `protobuf` `Value` `Message` object into a Python variable.
Parameters
----------
msg : google.protobuf.struct_pb2.Value
`protobuf` `Value` `Message` representing a variable.
Returns
-------
one of {None, bool, float, int, str}
Python variable represented by `msg`.
"""
value_kind = msg.WhichOneof("kind")
if value_kind == "null_value":
return None
elif value_kind == "bool_value":
return msg.bool_value
elif value_kind == "number_value":
return int(msg.number_value) if msg.number_value.is_integer() else msg.number_value
elif value_kind == "string_value":
return msg.string_value
elif value_kind == "list_value":
return [val_proto_to_python(val_msg)
for val_msg
in msg.list_value.values]
elif value_kind == "struct_value":
return {key: val_proto_to_python(val_msg)
for key, val_msg
in msg.struct_value.fields.items()}
else:
raise NotImplementedError("retrieved value type is not supported")
def unravel_key_values(rpt_key_value_msg):
"""
Converts a repeated KeyValue field of a protobuf message into a dictionary.
Parameters
----------
rpt_key_value_msg : google.protobuf.pyext._message.RepeatedCompositeContainer
Repeated KeyValue field of a protobuf message.
Returns
-------
dict of str to {None, bool, float, int, str}
Names and values.
"""
return {key_value.key: val_proto_to_python(key_value.value)
for key_value
in rpt_key_value_msg}
def unravel_artifacts(rpt_artifact_msg):
"""
Converts a repeated Artifact field of a protobuf message into a list of names.
Parameters
----------
rpt_artifact_msg : google.protobuf.pyext._message.RepeatedCompositeContainer
Repeated Artifact field of a protobuf message.
Returns
-------
list of str
Names of artifacts.
"""
return [artifact.key
for artifact
in rpt_artifact_msg]
def unravel_observation(obs_msg):
"""
Converts an Observation protobuf message into a more straightforward Python tuple.
This is useful because an Observation message has a oneof that's finicky to handle.
Returns
-------
str
Name of observation.
{None, bool, float, int, str}
Value of observation.
str
Human-readable timestamp.
"""
if obs_msg.WhichOneof("oneOf") == "attribute":
key = obs_msg.attribute.key
value = obs_msg.attribute.value
elif obs_msg.WhichOneof("oneOf") == "artifact":
key = obs_msg.artifact.key
value = "{} artifact".format(_CommonService.ArtifactTypeEnum.ArtifactType.Name(obs_msg.artifact.artifact_type))
return (
key,
val_proto_to_python(value),
timestamp_to_str(obs_msg.timestamp),
)
def unravel_observations(rpt_obs_msg):
"""
Converts a repeated Observation field of a protobuf message into a dictionary.
Parameters
----------
rpt_obs_msg : google.protobuf.pyext._message.RepeatedCompositeContainer
Repeated Observation field of a protobuf message.
Returns
-------
dict of str to list of tuples ({None, bool, float, int, str}, str)
Names and observation sequences.
"""
observations = {}
for obs_msg in rpt_obs_msg:
key, value, timestamp = unravel_observation(obs_msg)
observations.setdefault(key, []).append((value, timestamp))
return observations
def validate_flat_key(key):
"""
Checks whether `key` contains invalid characters.
To prevent bugs with querying (which allow dot-delimited nested keys), flat keys (such as those
used for individual metrics) must not contain periods.
Furthermore, to prevent potential bugs with the back end down the line, keys should be restricted
to alphanumeric characters, underscores, and dashes until we can verify robustness.
Parameters
----------
key : str
Name of metadatum.
Raises
------
ValueError
If `key` contains invalid characters.
"""
for c in key:
if c not in _VALID_FLAT_KEY_CHARS:
raise ValueError("`key` may only contain alphanumeric characters, underscores, dashes,"
" and forward slashes")
def generate_default_name():
"""
Generates a string that can be used as a default entity name while avoiding collisions.
The generated string is a concatenation of the current process ID and the current Unix timestamp,
such that a collision should only occur if a single process produces two of an entity at the same
nanosecond.
Returns
-------
name : str
String generated from the current process ID and Unix timestamp.
"""
return "{}{}".format(os.getpid(), str(time.time()).replace('.', ''))
class UTC(datetime.tzinfo):
"""UTC timezone class for Python 2 timestamp calculations"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
def timestamp_to_ms(timestamp):
"""
Converts a Unix timestamp into one with millisecond resolution.
Parameters
----------
timestamp : float or int
Unix timestamp.
Returns
-------
int
`timestamp` with millisecond resolution (13 integer digits).
"""
num_integer_digits = len(str(timestamp).split('.')[0])
return int(timestamp*10**(13 - num_integer_digits))
def ensure_timestamp(timestamp):
"""
Converts a representation of a datetime into a Unix timestamp with millisecond resolution.
If `timestamp` is provided as a string, this function attempts to use pandas (if installed) to
parse it into a Unix timestamp, since pandas can interally handle many different human-readable
datetime string representations. If pandas is not installed, this function will only handle an
ISO 8601 representation.
Parameters
----------
timestamp : str or float or int
String representation of a datetime or numerical Unix timestamp.
Returns
-------
int
`timestamp` with millisecond resolution (13 integer digits).
"""
if isinstance(timestamp, six.string_types):
try: # attempt with pandas, which can parse many time string formats
return timestamp_to_ms(pd.Timestamp(timestamp).timestamp())
except NameError: # pandas not installed
six.raise_from(ValueError("pandas must be installed to parse datetime strings"),
None)
except ValueError: # can't be handled by pandas
six.raise_from(ValueError("unable to parse datetime string \"{}\"".format(timestamp)),
None)
elif isinstance(timestamp, numbers.Real):
return timestamp_to_ms(timestamp)
elif isinstance(timestamp, datetime.datetime):
if six.PY2:
# replicate https://docs.python.org/3/library/datetime.html#datetime.datetime.timestamp
seconds = (timestamp - datetime.datetime(1970, 1, 1, tzinfo=UTC())).total_seconds()
else: # Python 3
seconds = timestamp.timestamp()
return timestamp_to_ms(seconds)
else:
raise TypeError("unable to parse timestamp of type {}".format(type(timestamp)))
def timestamp_to_str(timestamp):
"""
Converts a Unix timestamp into a human-readable string representation.
Parameters
----------
timestamp : int
Numerical Unix timestamp.
Returns
-------
str
Human-readable string representation of `timestamp`.
"""
num_digits = len(str(timestamp))
return str(datetime.datetime.fromtimestamp(timestamp*10**(10 - num_digits)))
def now():
"""
Returns the current Unix timestamp with millisecond resolution.
Returns
-------
now : int
Current Unix timestamp in milliseconds.
"""
return timestamp_to_ms(time.time())
def get_python_version():
"""
Returns the version number of the locally-installed Python interpreter.
Returns
-------
str
Python version number in the form "{major}.{minor}.{patch}".
"""
return '.'.join(map(str, sys.version_info[:3]))
def save_notebook(notebook_path=None, timeout=5):
"""
Saves the current notebook on disk and returns its contents after the file has been rewritten.
Parameters
----------
notebook_path : str, optional
Filepath of the Jupyter Notebook.
timeout : float, default 5
Maximum number of seconds to wait for the notebook to save.
Returns
-------
notebook_contents : file-like
An in-memory copy of the notebook's contents at the time this function returns. This can
be ignored, but is nonetheless available to minimize the risk of a race condition caused by
delaying the read until a later time.
Raises
------
OSError
If the notebook is not saved within `timeout` seconds.
"""
if notebook_path is None:
notebook_path = get_notebook_filepath()
modtime = os.path.getmtime(notebook_path)
display(Javascript('''
require(["base/js/namespace"],function(Jupyter) {
Jupyter.notebook.save_checkpoint();
});
'''))
# wait for file to be modified
start_time = time.time()
while time.time() - start_time < timeout:
new_modtime = os.path.getmtime(notebook_path)
if new_modtime > modtime:
break
time.sleep(0.01)
else:
raise OSError("unable to save notebook")
# wait for file to be rewritten
timeout -= (time.time() - start_time) # remaining time
start_time = time.time()
while time.time() - start_time < timeout:
with open(notebook_path, 'r') as f:
contents = f.read()
if contents:
return six.StringIO(contents)
time.sleep(0.01)
else:
raise OSError("unable to read saved notebook")
def get_notebook_filepath():
"""
Returns the filesystem path of the Jupyter notebook running the Client.
This implementation is from https://github.com/jupyter/notebook/issues/1000#issuecomment-359875246.
Returns
-------
str
Raises
------
OSError
If one of the following is true:
- Jupyter is not installed
- Client is not being called from a notebook
- the calling notebook cannot be identified
"""
try:
connection_file = ipykernel.connect.get_connection_file()
except (NameError, # Jupyter not installed
RuntimeError): # not in a Notebook
pass
else:
kernel_id = re.search('kernel-(.*).json', connection_file).group(1)
for server in list_running_servers():
response = requests.get(urljoin(server['url'], 'api/sessions'),
params={'token': server.get('token', '')})
if response.ok:
for session in response.json():
if session['kernel']['id'] == kernel_id:
relative_path = session['notebook']['path']
return os.path.join(server['notebook_dir'], relative_path)
raise OSError("unable to find notebook file")
def get_script_filepath():
"""
Returns the filesystem path of the Python script running the Client.
This function iterates back through the call stack until it finds a non-Verta stack frame and
returns its filepath.
Returns
-------
str
Raises
------
OSError
If the calling script cannot be identified.
"""
for frame_info in inspect.stack():
module = inspect.getmodule(frame_info[0])
if module is None or module.__name__.split('.', 1)[0] != "verta":
filepath = frame_info[1]
if os.path.exists(filepath): # e.g. Jupyter fakes the filename for cells
return filepath
else:
break # continuing might end up returning a built-in
raise OSError("unable to find script file")
def is_org(workspace_name, conn):
response = make_request(
"GET",
"{}://{}/api/v1/uac-proxy/organization/getOrganizationByName".format(conn.scheme, conn.socket),
conn, params={'org_name': workspace_name},
)
return response.status_code != 404
|
mit
|
ClonedOne/PythonScripts
|
CategoricalScatterPlot.py
|
1
|
6646
|
import csv, sys, pprint
import random as rnd
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from itertools import combinations
from matplotlib import cm
def acquire_data(file_name):
data_dict = {}
with open(file_name) as csvfile:
reader = csv.DictReader(csvfile)
for field in reader.fieldnames:
data_dict[field] = []
for row in reader:
for field in reader.fieldnames:
data_dict[field].append(row[field])
return data_dict
def acquire_categories(file_name):
categories = {}
with open(file_name) as csvfile:
reader = csv.DictReader(csvfile)
for field in reader.fieldnames:
categories[field] = []
for row in reader:
for field in reader.fieldnames:
categories[field].append(row[field])
return categories
def acquire_labels(file_name):
label_list = []
with open(file_name) as label_file:
for line in label_file:
label_list.append(line.strip())
return label_list
def acquire_stats(data_dict, categories):
stat_dict = {}
cat_list = categories.keys()
for cat in cat_list:
stat_dict[cat] = {}
for cat in data_dict:
if cat in cat_list:
list_of_values = data_dict[cat]
for value in list_of_values:
if value in stat_dict[cat]:
stat_dict[cat][value] += 1
else:
stat_dict[cat][value] = 1
return stat_dict
def compute_points_dataframe(data_dict, categories, fuzzy=True):
points = {}
cat_list = categories.keys()
for category in cat_list:
points[category] = []
data_headers = data_dict.keys()
for header in data_headers:
if header not in cat_list:
identifier = header
number_of_points = len(data_dict[data_headers[0]])
for i in range(number_of_points):
for category in cat_list:
if fuzzy:
points[category].append(categories[category].index(data_dict[category][i]) + rnd.uniform(-0.1, 0.1))
else:
points[category].append(categories[category].index(data_dict[category][i]))
d = {}
for category in cat_list:
d[category] = pd.Series(points[category], index=data_dict[identifier])
df = pd.DataFrame(d)
return df
def graph_single_point(df, cat1, cat2, categories, label_list=None):
color_map = cm.get_cmap('winter')
cat_length_1 = len(categories[cat1])
cat_length_2 = len(categories[cat2])
fig = plt.figure()
ax = fig.add_subplot(111)
df.plot(cat1, cat2, kind='scatter', marker='o', ax=ax, s=65, c=df[cat1], linewidth=0, cmap=color_map)
plt.xticks(np.arange(cat_length_1 + 1), categories[cat1], fontsize=14)
plt.yticks(np.arange(cat_length_2 + 1), categories[cat2], fontsize=14)
for item in [ax.title, ax.xaxis.label, ax.yaxis.label]:
item.set_fontsize(14)
if label_list is not None:
for k, v in df.iterrows():
if k in label_list:
ax.annotate(k, (v[cat1], v[cat2]), xytext=(rnd.randint(-50, 50), rnd.randint(-60, 60)),
textcoords='offset points',
family='sans-serif', fontsize=16, ha='center', va='bottom', color='darkslategrey',
arrowprops=dict(arrowstyle='-|>', connectionstyle='arc3,rad=0'))
plt.show()
def graph_points_bubble(data_frame, cat1, cat2, categories):
point_occurrences = count_point_occurrences(data_frame, cat1, cat2)
cmap = cm.get_cmap('winter')
cat_length_1 = len(categories[cat1])
cat_length_2 = len(categories[cat2])
fig = plt.figure()
ax = fig.add_subplot(111)
plt.xticks(np.arange(cat_length_1 + 1), categories[cat1], fontsize=14)
plt.yticks(np.arange(cat_length_2 + 1), categories[cat2], fontsize=14)
data_frame.plot(cat1, cat2, kind='scatter', marker='o', ax=ax, s=point_occurrences, c=data_frame[cat1], linewidth=0, cmap=cmap)
for item in [ax.title, ax.xaxis.label, ax.yaxis.label]:
item.set_fontsize(16)
plt.show()
def graph_stats_bubble(stat_dict, categories):
cmap = cm.get_cmap('brg')
x = []
lables_x = []
y = []
i = 0
for cat in categories:
x.append(i)
lables_x.append(cat)
i += 1
if cat in stat_dict:
y.append(stat_dict[cat])
else:
y.append(0)
s = [30 * (elem ** 2) for elem in y]
fig = plt.figure()
ax = fig.add_subplot(111)
plt.xticks(np.arange(len(lables_x)), lables_x, fontsize=14)
for item in [ax.title, ax.xaxis.label, ax.yaxis.label]:
item.set_fontsize(14)
plt.scatter(x, y, s=s, c=s, cmap=cmap)
plt.show()
def count_point_occurrences(df, cat1, cat2):
occurs = {}
feature_1 = df[cat1]
feature_2 = df[cat2]
for i in range(len(feature_1)):
point = feature_1[i], feature_2[i]
if point in occurs:
occurs[point] += 1
else:
occurs[point] = 1
occurs_list = []
for i in range(len(feature_1)):
point = feature_1[i], feature_2[i]
occurs_list.append((occurs[point] ** 2) * 100)
return occurs_list
def main():
debug = True
if len(sys.argv) < 3:
print "Please provide a valid file path for the data file and the category file [optional label file path]"
return
label_list = []
data_dict = acquire_data(sys.argv[1])
categories = acquire_categories(sys.argv[2])
if len(sys.argv) == 4:
label_list = acquire_labels(sys.argv[3])
if debug:
pprint.pprint(data_dict)
for key in data_dict:
print len(data_dict[key])
pprint.pprint(categories)
for key in categories:
print len(categories[key])
df = compute_points_dataframe(data_dict, categories)
stat_dict = acquire_stats(data_dict, categories)
df_not_fuzzy = compute_points_dataframe(data_dict, categories, fuzzy=False)
if debug:
print stat_dict
print df
print df_not_fuzzy
'''
for key in stat_dict.keys():
graph_stats_bubble(stat_dict[key], categories[key])
'''
'''
for comb in combinations(categories.keys(), 2):
if len(label_list) > 0:
graph_single_point(df, comb[0], comb[1], categories, label_list)
else:
graph_single_point(df, comb[0], comb[1], categories)
'''
for comb in combinations(categories.keys(), 2):
graph_points_bubble(df_not_fuzzy, comb[0], comb[1], categories)
if __name__ == '__main__':
main()
|
mit
|
dallascard/guac
|
core/models/ecc.py
|
1
|
1531
|
import numpy as np
import pandas as pd
from scipy import sparse
from scipy import stats
from classifier_chain import ClassifierChain
class ECC:
chains = None
def __init__(self, model_type, codes, feature_names=None, alphas=None, n_chains=5, **kwargs):
self.chains = []
for i in range(n_chains):
chain = ClassifierChain(model_type, codes, feature_names, alphas, **kwargs)
self.chains.append(chain)
def fit(self, orig_X, all_y):
for i, chain in enumerate(self.chains):
chain.fit(orig_X, all_y)
def tune_by_cv(self, orig_X, all_y, alpha_values, td_splits, n_dev_folds, reuser=None, verbose=1):
for i, chain in enumerate(self.chains):
print "Tuning chain ", i
best_alphas = chain.tune_by_cv(orig_X, all_y, alpha_values, td_splits, n_dev_folds, reuser, verbose)
def predict(self, orig_X, index, codes):
predictions = []
for i, chain in enumerate(self.chains):
predictions.append(chain.predict(orig_X, index, codes))
prediction_arrays = []
for df in predictions:
prediction_arrays.append(np.reshape(df.values, [len(index), len(codes), 1]))
# take the most common prediction across the ensemble
final_stack = np.concatenate(prediction_arrays, axis=2)
final = stats.mode(final_stack, axis=2)[0][:, :, 0]
final_df = pd.DataFrame(final, index=index, columns=codes)
return final_df
def save_models(self):
pass
|
apache-2.0
|
QuLogic/iris
|
lib/iris/tests/unit/plot/test_points.py
|
11
|
3049
|
# (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.plot.points` function."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
from iris.tests.stock import simple_2d
from iris.tests.unit.plot import TestGraphicStringCoord, MixinCoords
if tests.MPL_AVAILABLE:
import iris.plot as iplt
@tests.skip_plot
class TestStringCoordPlot(TestGraphicStringCoord):
def test_yaxis_labels(self):
iplt.points(self.cube, coords=('bar', 'str_coord'))
self.assertBoundsTickLabels('yaxis')
def test_xaxis_labels(self):
iplt.points(self.cube, coords=('str_coord', 'bar'))
self.assertBoundsTickLabels('xaxis')
def test_xaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim(0, 3)
iplt.points(self.cube, coords=('str_coord', 'bar'), axes=ax)
plt.close(fig)
self.assertPointsTickLabels('xaxis', ax)
def test_yaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_ylim(0, 3)
iplt.points(self.cube, coords=('bar', 'str_coord'), axes=ax)
plt.close(fig)
self.assertPointsTickLabels('yaxis', ax)
def test_geoaxes_exception(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
self.assertRaises(TypeError, iplt.points,
self.lat_lon_cube, axes=ax)
plt.close(fig)
@tests.skip_plot
class TestCoords(tests.IrisTest, MixinCoords):
def setUp(self):
# We have a 2d cube with dimensionality (bar: 3; foo: 4)
self.cube = simple_2d(with_bounds=False)
self.foo = self.cube.coord('foo').points
self.foo_index = np.arange(self.foo.size)
self.bar = self.cube.coord('bar').points
self.bar_index = np.arange(self.bar.size)
self.data = None
self.dataT = None
self.mpl_patch = self.patch('matplotlib.pyplot.scatter')
self.draw_func = iplt.points
if __name__ == "__main__":
tests.main()
|
gpl-3.0
|
tkcroat/Augerquant
|
Auger_import_workflow.py
|
1
|
6225
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
Auger_batch_header
Designed to read out pertinent header information from all Auger files within a folder.
Output into single log file for import into Excel or elsewhere
"""
#%% Load modules
import os, glob,sys # already run with functions
import pandas as pd
#import re, struct (only used in sub-functions)
# import csv, fileinput
if 'C:\\Users\\tkc\\Documents\\Python_Scripts\\Augerquant\\Modules' not in sys.path:
sys.path.append('C:\\Users\\tkc\\Documents\\Python_Scripts\\Augerquant\\Modules')
import Auger_batch_import_functions as Auger
import Auger_utility_functions as AESutils
import Auger_quantmap_functions as QM
#%% Set data directory with Auger data files (via cd in Ipython console, using tinker or using os.chdir)
os.chdir('C:\\Temp\\AugerQM\\28Sep17')
# from tkinter import filedialog
# datapath = filedialog.askdirectory(initialdir="H:\\Research_data", title = "choose data directory")
#%% Load list of all Auger data files (images, spectral maps and spectra)
filelist=glob.glob('*.sem')+glob.glob('*.map')+glob.glob('*.spe')
filelist=glob.glob('*.map')
filelist=glob.glob('*.sem')
filelist=glob.glob('*.spe')
# Find and read Auger logbook (xls file that contains phrase "Auger_logbook")
# this file is to associate sample/project names with Auger file numbers and can be used to combine/average multiple spe files
Augerlogbook=Auger.openorcreatelogbook(filelist)
# Check log file for consistency between xls log and data files in directory
# any errors output to iconsole... i.e. combinable files must all be spe, no missing entries in logbook or missing data files
Auger.checklogfile(filelist, Augerlogbook)
#%% Main file processing loop for spe, sem and map files (header param extraction/ create csv from binaries)
# If file in Augerparamlog.csv it won't be reprocessed.. for reprocessing delete filenumber from that log or delete entire log
kwargs={}
kwargs={'move':False} # option to not move files to /sub
AugerParamLog = Auger.Augerbatchimport(filelist, Augerlogbook, **kwargs) # Extract params and process SEM, SPE and MAP files
AugerParamLog = Augerbatchimport(filelist, Augerlogbook)
# augerparam log is not auto-saved ...
# log files are overwritten if they exist by default (but data binaries are not)
AugerParamLog.to_csv('Augerparamlog.csv',index=False) # save all params to new more complete log file (not autosaved)
spelist=AugerParamLog[(AugerParamLog['Areas']>=1)] # remove image & map files
# Create jpg images annotated with spatial areas for spe files (assumes existence of .sem image taken just before .spe file)
# easiest to do this before moving combined files (into separate log)
SpatialAreas=pd.read_csv('spatialareaslog.csv') # open automatically created spatial areas log for spe files
Auger.makeannotatedjpg(AugerParamLog, SpatialAreas) # makes annotated jpgs for all spe files with prior sem image
# Manually create single annotated image with spatial areas for selected jpg file and spe file
Auger.annotateone('Acfer094.122.jpg','Acfer094.124.csv', SpatialAreas)
# Determine stage drift/pixel shift from pre and post images by filenumber
shift, error = Auger.findshift(127, 129, AugerParamLog)
# Combine quantmap spe files (renumbers areas and moves all areas to single combined file with filenumber firstlast.csv)
AugerParamLog=QM.combineQMdata(AugerParamLog,'221-222',QMname='')
# Renumber/rename spatial areas for quant map files
SpatialAreas=pd.read_csv('spatialareaslog.csv')
SpatialAreas=QM.renumberQMareas(SpatialAreas, '221-222',QMname='') # copy spatial areas for combined QM file, autosaved
# log files are overwritten if they exist by default (but data binaries are not)
AugerParamLog.to_csv('Augerparamlog.csv',index=False) # save all params to new more complete log file (not autosaved)
#%% Section to average and combine multiple data passes
''' If starting from here (combine multiple spe), just cd to data directory, reload "Auger_logbook" above, and then load AugerParamLog
AugerParamLog2=pd.read_csv('Augerparamlog.csv', encoding='cp437')
AugerParamLogsubs=pd.read_csv('sub\\Augerparamlog_subs.csv', encoding='cp437')
AugerParamLog=pd.concat([AugerParamLog,AugerParamLogsubs])
AugerParamLog=AugerParamLog.drop_duplicates(['Filenumber'])
'''
#AVERAGE-COMBINE METHOD 1 (with unique filenumbers for entire project and combinations defined in xls logbook
combinelist=Augerlogbook[(Augerlogbook['Lastnumber']>0)] # gets file ranges to combine via averaging
# Combine consecutive files via averaging, move underlying files to /sub
AugerParamLog=Auger.combinespeloop(combinelist, AugerParamLog, movefiles=False)
AugerParamLog.to_csv('Augerparamlog.csv',index=False) # save all params to new more complete log file
# AVERAGE-COMBINE METHOD 2: Search for files with identical basename, X and Y, combine via averaging, save, create log entry
AugerParamLog=Auger.autocombinespe(AugerParamLog)
AugerParamLog=autocombinespe(AugerParamLog)
# Combine via averaging directly from list of files (different entire spectra with same defined areas)
fullfilelist=glob.glob('*.csv') # Select based on any naming rule (or use any other means to get list of csv filenames to combine)
filelist=fullfilelist[37:40]
# TODO select list of files with tkinter
AugerParamLog=Auger.combinespelist(filelist, AugerParamLog, csvname='', movefiles=False) # if csvname blank, autonamed based on file#s
# TODO average multiple areas (select by number) within single file
# Check csv files against AugerParamLog
AugerParamLog=AESutils.checkparamlog(AugerParamLog, makeentry=False)
# Assembling larger parameter log files from subdirectories
paramloglist=glob.glob('**/Augerparamlog.csv', recursive=True)
integloglist=glob.glob('**/Integquantlog.csv', recursive=True)
smdifloglist=glob.glob('**/Smdifpeakslog.csv', recursive=True)
Masterparamlog, Masterinteglog, Mastersmdiflog=AESutils.assembledataset(paramloglist, integloglist, smdifloglist)
Masterparamlog, Masterinteglog, Mastersmdiflog=assembledataset(paramloglist, integloglist, smdifloglist)
Masterparamlog.to_csv('Augerparamlog.csv', index=False)
Masterinteglog.to_csv('Integquantlog.csv', index=False)
Mastersmdiflog.to_csv('Smdifpeakslog.csv', index=False)
|
mit
|
lazywei/scikit-learn
|
examples/linear_model/plot_sgd_loss_functions.py
|
249
|
1095
|
"""
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-',
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), 'm-',
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-',
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), 'y--',
label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
|
bsd-3-clause
|
pompiduskus/scikit-learn
|
sklearn/linear_model/tests/test_randomized_l1.py
|
214
|
4690
|
# Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
|
bsd-3-clause
|
sagemathinc/cocalc
|
src/smc_sagews/smc_sagews/graphics.py
|
2
|
20982
|
###############################################################################
#
# CoCalc: Collaborative Calculation
#
# Copyright (C) 2016, Sagemath Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import absolute_import
import json, math
from . import sage_salvus
from uuid import uuid4
def uuid():
return str(uuid4())
def json_float(t):
if t is None:
return t
t = float(t)
# Neither of nan or inf get JSON'd in a way that works properly, for some reason. I don't understand why.
if math.isnan(t) or math.isinf(t):
return None
else:
return t
#######################################################
# Three.js based plotting
#######################################################
import sage.plot.plot3d.index_face_set
import sage.plot.plot3d.shapes
import sage.plot.plot3d.base
import sage.plot.plot3d.shapes2
from sage.structure.element import Element
def jsonable(x):
if isinstance(x, Element):
return json_float(x)
elif isinstance(x, (list, tuple)):
return [jsonable(y) for y in x]
return x
def graphics3d_to_jsonable(p):
obj_list = []
def parse_obj(obj):
material_name = ''
faces = []
for item in obj.split("\n"):
tmp = str(item.strip())
if not tmp:
continue
k = tmp.split()
if k[0] == "usemtl": # material name
material_name = k[1]
elif k[0] == 'f': # face
v = [int(a) for a in k[1:]]
faces.append(v)
# other types are parse elsewhere in a different pass.
return [{"material_name": material_name, "faces": faces}]
def parse_texture(p):
texture_dict = []
textures = p.texture_set()
for item in range(0, len(textures)):
texture_pop = textures.pop()
string = str(texture_pop)
item = string.split("(")[1]
name = item.split(",")[0]
color = texture_pop.color
tmp_dict = {"name": name, "color": color}
texture_dict.append(tmp_dict)
return texture_dict
def get_color(name, texture_set):
for item in range(0, len(texture_set)):
if (texture_set[item]["name"] == name):
color = texture_set[item]["color"]
color_list = [color[0], color[1], color[2]]
break
else:
color_list = []
return color_list
def parse_mtl(p):
mtl = p.mtl_str()
all_material = []
for item in mtl.split("\n"):
if "newmtl" in item:
tmp = str(item.strip())
tmp_list = []
try:
texture_set = parse_texture(p)
color = get_color(name, texture_set)
except (ValueError, UnboundLocalError):
pass
try:
tmp_list = {
"name": name,
"ambient": ambient,
"specular": specular,
"diffuse": diffuse,
"illum": illum_list[0],
"shininess": shininess_list[0],
"opacity": opacity_diffuse[3],
"color": color
}
all_material.append(tmp_list)
except (ValueError, UnboundLocalError):
pass
ambient = []
specular = []
diffuse = []
illum_list = []
shininess_list = []
opacity_diffuse = []
tmp_list = []
name = tmp.split()[1]
if "Ka" in item:
tmp = str(item.strip())
for t in tmp.split():
try:
ambient.append(json_float(t))
except ValueError:
pass
if "Ks" in item:
tmp = str(item.strip())
for t in tmp.split():
try:
specular.append(json_float(t))
except ValueError:
pass
if "Kd" in item:
tmp = str(item.strip())
for t in tmp.split():
try:
diffuse.append(json_float(t))
except ValueError:
pass
if "illum" in item:
tmp = str(item.strip())
for t in tmp.split():
try:
illum_list.append(json_float(t))
except ValueError:
pass
if "Ns" in item:
tmp = str(item.strip())
for t in tmp.split():
try:
shininess_list.append(json_float(t))
except ValueError:
pass
if "d" in item:
tmp = str(item.strip())
for t in tmp.split():
try:
opacity_diffuse.append(json_float(t))
except ValueError:
pass
try:
color = list(p.all[0].texture.color.rgb())
except (ValueError, AttributeError):
pass
try:
texture_set = parse_texture(p)
color = get_color(name, texture_set)
except (ValueError, AttributeError):
color = []
#pass
tmp_list = {
"name": name,
"ambient": ambient,
"specular": specular,
"diffuse": diffuse,
"illum": illum_list[0],
"shininess": shininess_list[0],
"opacity": opacity_diffuse[3],
"color": color
}
all_material.append(tmp_list)
return all_material
#####################################
# Conversion functions
#####################################
def convert_index_face_set(p, T, extra_kwds):
if T is not None:
p = p.transform(T=T)
face_geometry = parse_obj(p.obj())
if hasattr(p, 'has_local_colors') and p.has_local_colors():
convert_index_face_set_with_colors(p, T, extra_kwds)
return
material = parse_mtl(p)
vertex_geometry = []
obj = p.obj()
for item in obj.split("\n"):
if "v" in item:
tmp = str(item.strip())
for t in tmp.split():
try:
vertex_geometry.append(json_float(t))
except ValueError:
pass
myobj = {
"face_geometry": face_geometry,
"type": 'index_face_set',
"vertex_geometry": vertex_geometry,
"material": material,
"has_local_colors": 0
}
for e in ['wireframe', 'mesh']:
if p._extra_kwds is not None:
v = p._extra_kwds.get(e, None)
if v is not None:
myobj[e] = jsonable(v)
obj_list.append(myobj)
def convert_index_face_set_with_colors(p, T, extra_kwds):
face_geometry = [{
"material_name":
p.texture.id,
"faces": [[int(v) + 1 for v in f[0]] + [f[1]]
for f in p.index_faces_with_colors()]
}]
material = parse_mtl(p)
vertex_geometry = [json_float(t) for v in p.vertices() for t in v]
myobj = {
"face_geometry": face_geometry,
"type": 'index_face_set',
"vertex_geometry": vertex_geometry,
"material": material,
"has_local_colors": 1
}
for e in ['wireframe', 'mesh']:
if p._extra_kwds is not None:
v = p._extra_kwds.get(e, None)
if v is not None:
myobj[e] = jsonable(v)
obj_list.append(myobj)
def convert_text3d(p, T, extra_kwds):
obj_list.append({
"type":
"text",
"text":
p.string,
"pos": [0, 0, 0] if T is None else T([0, 0, 0]),
"color":
"#" + p.get_texture().hex_rgb(),
'fontface':
str(extra_kwds.get('fontface', 'Arial')),
'constant_size':
bool(extra_kwds.get('constant_size', True)),
'fontsize':
int(extra_kwds.get('fontsize', 12))
})
def convert_line(p, T, extra_kwds):
obj_list.append({
"type":
"line",
"points":
jsonable(p.points if T is None else
[T.transform_point(point) for point in p.points]),
"thickness":
jsonable(p.thickness),
"color":
"#" + p.get_texture().hex_rgb(),
"arrow_head":
bool(p.arrow_head)
})
def convert_point(p, T, extra_kwds):
obj_list.append({
"type": "point",
"loc": p.loc if T is None else T(p.loc),
"size": json_float(p.size),
"color": "#" + p.get_texture().hex_rgb()
})
def convert_combination(p, T, extra_kwds):
for x in p.all:
handler(x)(x, T, p._extra_kwds)
def convert_transform_group(p, T, extra_kwds):
if T is not None:
T = T * p.get_transformation()
else:
T = p.get_transformation()
for x in p.all:
handler(x)(x, T, p._extra_kwds)
def nothing(p, T, extra_kwds):
pass
def handler(p):
if isinstance(p, sage.plot.plot3d.index_face_set.IndexFaceSet):
return convert_index_face_set
elif isinstance(p, sage.plot.plot3d.shapes.Text):
return convert_text3d
elif isinstance(p, sage.plot.plot3d.base.TransformGroup):
return convert_transform_group
elif isinstance(p, sage.plot.plot3d.base.Graphics3dGroup):
return convert_combination
elif isinstance(p, sage.plot.plot3d.shapes2.Line):
return convert_line
elif isinstance(p, sage.plot.plot3d.shapes2.Point):
return convert_point
elif isinstance(p, sage.plot.plot3d.base.PrimitiveObject):
return convert_index_face_set
elif isinstance(p, sage.plot.plot3d.base.Graphics3d):
# this is an empty scene
return nothing
else:
raise NotImplementedError("unhandled type ", type(p))
# start it going -- this modifies obj_list
handler(p)(p, None, None)
# now obj_list is full of the objects
return obj_list
###
# Interactive 2d Graphics
###
import os, matplotlib.figure
class InteractiveGraphics(object):
def __init__(self, g, **events):
self._g = g
self._events = events
def figure(self, **kwds):
if isinstance(self._g, matplotlib.figure.Figure):
return self._g
options = dict()
options.update(self._g.SHOW_OPTIONS)
options.update(self._g._extra_kwds)
options.update(kwds)
options.pop('dpi')
options.pop('transparent')
options.pop('fig_tight')
fig = self._g.matplotlib(**options)
from matplotlib.backends.backend_agg import FigureCanvasAgg
canvas = FigureCanvasAgg(fig)
fig.set_canvas(canvas)
fig.tight_layout(
) # critical, since sage does this -- if not, coords all wrong
return fig
def save(self, filename, **kwds):
if isinstance(self._g, matplotlib.figure.Figure):
self._g.savefig(filename)
else:
# When fig_tight=True (the default), the margins are very slightly different.
# I don't know how to properly account for this yet (or even if it is possible),
# since it only happens at figsize time -- do "a=plot(sin); a.save??".
# So for interactive graphics, we just set this to false no matter what.
kwds['fig_tight'] = False
self._g.save(filename, **kwds)
def show(self, **kwds):
fig = self.figure(**kwds)
ax = fig.axes[0]
# upper left data coordinates
xmin, ymax = ax.transData.inverted().transform(
fig.transFigure.transform((0, 1)))
# lower right data coordinates
xmax, ymin = ax.transData.inverted().transform(
fig.transFigure.transform((1, 0)))
id = '_a' + uuid().replace('-', '')
def to_data_coords(p):
# 0<=x,y<=1
return ((xmax - xmin) * p[0] + xmin,
(ymax - ymin) * (1 - p[1]) + ymin)
if kwds.get('svg', False):
filename = '%s.svg' % id
del kwds['svg']
else:
filename = '%s.png' % id
fig.savefig(filename)
def f(event, p):
self._events[event](to_data_coords(p))
sage_salvus.salvus.namespace[id] = f
x = {}
for ev in list(self._events.keys()):
x[ev] = id
sage_salvus.salvus.file(filename, show=True, events=x)
os.unlink(filename)
def __del__(self):
for ev in self._events:
u = self._id + ev
if u in sage_salvus.salvus.namespace:
del sage_salvus.salvus.namespace[u]
###
# D3-based interactive 2d Graphics
###
###
# The following is a modified version of graph_plot_js.py from the Sage library, which was
# written by Nathann Cohen in 2013.
###
def graph_to_d3_jsonable(G,
vertex_labels=True,
edge_labels=False,
vertex_partition=[],
edge_partition=[],
force_spring_layout=False,
charge=-120,
link_distance=50,
link_strength=1,
gravity=.04,
vertex_size=7,
edge_thickness=2,
width=None,
height=None,
**ignored):
r"""
Display a graph in CoCalc using the D3 visualization library.
INPUT:
- ``G`` -- the graph
- ``vertex_labels`` (boolean) -- Whether to display vertex labels (set to
``True`` by default).
- ``edge_labels`` (boolean) -- Whether to display edge labels (set to
``False`` by default).
- ``vertex_partition`` -- a list of lists representing a partition of the
vertex set. Vertices are then colored in the graph according to the
partition. Set to ``[]`` by default.
- ``edge_partition`` -- same as ``vertex_partition``, with edges
instead. Set to ``[]`` by default.
- ``force_spring_layout`` -- whether to take sage's position into account if
there is one (see :meth:`~sage.graphs.generic_graph.GenericGraph.` and
:meth:`~sage.graphs.generic_graph.GenericGraph.`), or to compute a spring
layout. Set to ``False`` by default.
- ``vertex_size`` -- The size of a vertex' circle. Set to `7` by default.
- ``edge_thickness`` -- Thickness of an edge. Set to ``2`` by default.
- ``charge`` -- the vertices' charge. Defines how they repulse each
other. See `<https://github.com/mbostock/d3/wiki/Force-Layout>`_ for more
information. Set to ``-120`` by default.
- ``link_distance`` -- See
`<https://github.com/mbostock/d3/wiki/Force-Layout>`_ for more
information. Set to ``30`` by default.
- ``link_strength`` -- See
`<https://github.com/mbostock/d3/wiki/Force-Layout>`_ for more
information. Set to ``1.5`` by default.
- ``gravity`` -- See
`<https://github.com/mbostock/d3/wiki/Force-Layout>`_ for more
information. Set to ``0.04`` by default.
EXAMPLES::
show(graphs.RandomTree(50), d3=True)
show(graphs.PetersenGraph(), d3=True, vertex_partition=g.coloring())
show(graphs.DodecahedralGraph(), d3=True, force_spring_layout=True)
show(graphs.DodecahedralGraph(), d3=True)
g = digraphs.DeBruijn(2,2)
g.allow_multiple_edges(True)
g.add_edge("10","10","a")
g.add_edge("10","10","b")
g.add_edge("10","10","c")
g.add_edge("10","10","d")
g.add_edge("01","11","1")
show(g, d3=True, vertex_labels=True,edge_labels=True,
link_distance=200,gravity=.05,charge=-500,
edge_partition=[[("11","12","2"),("21","21","a")]],
edge_thickness=4)
"""
directed = G.is_directed()
multiple_edges = G.has_multiple_edges()
# Associated an integer to each vertex
v_to_id = {v: i for i, v in enumerate(G.vertices())}
# Vertex colors
color = {i: len(vertex_partition) for i in range(G.order())}
for i, l in enumerate(vertex_partition):
for v in l:
color[v_to_id[v]] = i
# Vertex list
nodes = []
for v in G.vertices():
nodes.append({"name": str(v), "group": str(color[v_to_id[v]])})
# Edge colors.
edge_color_default = "#aaa"
from sage.plot.colors import rainbow
color_list = rainbow(len(edge_partition))
edge_color = {}
for i, l in enumerate(edge_partition):
for e in l:
u, v, label = e if len(e) == 3 else e + (None, )
edge_color[u, v, label] = color_list[i]
if not directed:
edge_color[v, u, label] = color_list[i]
# Edge list
edges = []
seen = {} # How many times has this edge been seen ?
for u, v, l in G.edges():
# Edge color
color = edge_color.get((u, v, l), edge_color_default)
# Computes the curve of the edge
curve = 0
# Loop ?
if u == v:
seen[u, v] = seen.get((u, v), 0) + 1
curve = seen[u, v] * 10 + 10
# For directed graphs, one also has to take into accounts
# edges in the opposite direction
elif directed:
if G.has_edge(v, u):
seen[u, v] = seen.get((u, v), 0) + 1
curve = seen[u, v] * 15
else:
if multiple_edges and len(G.edge_label(u, v)) != 1:
# Multiple edges. The first one has curve 15, then
# -15, then 30, then -30, ...
seen[u, v] = seen.get((u, v), 0) + 1
curve = (1 if seen[u, v] % 2 else -1) * (seen[u, v] //
2) * 15
elif not directed and multiple_edges:
# Same formula as above for multiple edges
if len(G.edge_label(u, v)) != 1:
seen[u, v] = seen.get((u, v), 0) + 1
curve = (1 if seen[u, v] % 2 else -1) * (seen[u, v] // 2) * 15
# Adding the edge to the list
edges.append({
"source": v_to_id[u],
"target": v_to_id[v],
"strength": 0,
"color": color,
"curve": curve,
"name": str(l) if edge_labels else ""
})
loops = [e for e in edges if e["source"] == e["target"]]
edges = [e for e in edges if e["source"] != e["target"]]
# Defines the vertices' layout if possible
Gpos = G.get_pos()
pos = []
if Gpos is not None and force_spring_layout is False:
charge = 0
link_strength = 0
gravity = 0
for v in G.vertices():
x, y = Gpos[v]
pos.append([json_float(x), json_float(-y)])
return {
"nodes": nodes,
"links": edges,
"loops": loops,
"pos": pos,
"directed": G.is_directed(),
"charge": int(charge),
"link_distance": int(link_distance),
"link_strength": int(link_strength),
"gravity": float(gravity),
"vertex_labels": bool(vertex_labels),
"edge_labels": bool(edge_labels),
"vertex_size": int(vertex_size),
"edge_thickness": int(edge_thickness),
"width": json_float(width),
"height": json_float(height)
}
|
agpl-3.0
|
florian-f/sklearn
|
examples/linear_model/plot_omp.py
|
4
|
1718
|
"""
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import pylab as pl
import numpy as np
from sklearn.linear_model import orthogonal_mp
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Dx
# |x|_0 = n_nonzero_coefs
y, D, x = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = x.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
pl.subplot(3, 1, 1)
pl.xlim(0, 512)
pl.title("Sparse signal")
pl.stem(idx, x[idx])
# plot the noise-free reconstruction
####################################
x_r = orthogonal_mp(D, y, n_nonzero_coefs)
idx_r, = x_r.nonzero()
pl.subplot(3, 1, 2)
pl.xlim(0, 512)
pl.title("Recovered signal from noise-free measurements")
pl.stem(idx_r, x_r[idx_r])
# plot the noisy reconstruction
###############################
x_r = orthogonal_mp(D, y_noisy, n_nonzero_coefs)
idx_r, = x_r.nonzero()
pl.subplot(3, 1, 3)
pl.xlim(0, 512)
pl.title("Recovered signal from noisy measurements")
pl.stem(idx_r, x_r[idx_r])
pl.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
pl.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
pl.show()
|
bsd-3-clause
|
ryanfobel/microdrop
|
microdrop/gui/experiment_log_controller.py
|
1
|
26474
|
"""
Copyright 2011 Ryan Fobel
This file is part of MicroDrop.
MicroDrop is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MicroDrop is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with MicroDrop. If not, see <http://www.gnu.org/licenses/>.
"""
from collections import namedtuple
import logging
import os
import pkg_resources
import time
import pandas as pd
from flatland import Form
from microdrop_utility import copytree
from microdrop_utility.gui import (combobox_set_model_from_list,
combobox_get_active_text, textview_get_text)
from path_helpers import path
from pygtkhelpers.delegates import SlaveView
from pygtkhelpers.ui.extra_dialogs import yesno
from pygtkhelpers.ui.extra_widgets import Directory
from pygtkhelpers.ui.notebook import NotebookManagerView
import gtk
from ..experiment_log import ExperimentLog
from ..plugin_manager import (IPlugin, SingletonPlugin, implements,
PluginGlobals, emit_signal, ScheduleRequest,
get_service_names, get_service_instance_by_name)
from ..plugin_helpers import AppDataController
from ..protocol import Protocol
from ..app_context import get_app
from ..dmf_device import DmfDevice
from .dmf_device_controller import DEVICE_FILENAME
logger = logging.getLogger(__name__)
from .. import glade_path
class ExperimentLogColumn():
def __init__(self, name, type, format_string=None):
self.name = name
self.type = type
self.format_string = format_string
class ExperimentLogContextMenu(SlaveView):
"""
Slave view for context-menu for a row in the experiment log step grid view.
"""
builder_path = glade_path().joinpath('experiment_log_context_menu.glade')
def popup(self, event):
for child in self.menu_popup.get_children():
if child.get_visible():
self.menu_popup.popup(None, None, None, event.button,
event.time, None)
break
def add_item(self, menu_item):
self.menu_popup.append(menu_item)
menu_item.show()
PluginGlobals.push_env('microdrop')
class ExperimentLogController(SingletonPlugin, AppDataController):
implements(IPlugin)
Results = namedtuple('Results', ['log', 'protocol', 'dmf_device'])
builder_path = glade_path().joinpath('experiment_log_window.glade')
@property
def AppFields(self):
return Form.of(
Directory.named('notebook_directory').using(default='', optional=True),
)
def __init__(self):
self.name = "microdrop.gui.experiment_log_controller"
self.builder = gtk.Builder()
self.builder.add_from_file(self.builder_path)
self.window = self.builder.get_object("window")
self.combobox_log_files = self.builder.get_object("combobox_log_files")
self.results = self.Results(None, None, None)
self.protocol_view = self.builder.get_object("treeview_protocol")
self.protocol_view.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
self.columns = [ExperimentLogColumn("Time (s)", float, "%.3f"),
ExperimentLogColumn("Step #", int),
ExperimentLogColumn("Duration (s)", float, "%.3f"),
ExperimentLogColumn("Voltage (VRMS)", int),
ExperimentLogColumn("Frequency (kHz)", float, "%.1f")]
(self.protocol_view.get_selection()
.connect("changed", self.on_treeview_selection_changed))
self.popup = ExperimentLogContextMenu()
self.notebook_manager_view = None
self.previous_notebook_dir = None
###########################################################################
# Callback methods
def on_app_exit(self):
self.save()
logger.info('[ExperimentLogController] Killing IPython notebooks')
if self.notebook_manager_view is not None:
self.notebook_manager_view.stop()
def on_button_load_device_clicked(self, widget, data=None):
app = get_app()
filename = path(os.path.join(app.experiment_log.directory,
str(self.results.log.experiment_id),
DEVICE_FILENAME))
try:
app.dmf_device_controller.load_device(filename)
except:
logger.error("Could not open %s" % filename)
def on_button_load_protocol_clicked(self, widget, data=None):
app = get_app()
filename = path(os.path.join(app.experiment_log.directory,
str(self.results.log.experiment_id),
'protocol'))
app.protocol_controller.load_protocol(filename)
def on_button_open_clicked(self, widget, data=None):
'''
Open selected experiment log directory in system file browser.
'''
app = get_app()
self.results.log.get_log_path().launch()
def on_button_notes_clicked(self, widget, data=None):
'''
Open selected experiment log notes using the default text editor.
'''
app = get_app()
notes_path = self.results.log.get_log_path() / 'notes.txt'
notes_path.launch()
def on_combobox_log_files_changed(self, widget, data=None):
if self.notebook_manager_view is not None:
# Update active notebook directory for notebook_manager_view.
log_root = self.get_selected_log_root()
self.notebook_manager_view.notebook_dir = log_root
self.update()
def on_dmf_device_swapped(self, old_dmf_device, dmf_device):
app = get_app()
experiment_log = None
if dmf_device and dmf_device.name:
device_path = os.path.join(app.get_device_directory(),
dmf_device.name, "logs")
experiment_log = ExperimentLog(device_path)
emit_signal("on_experiment_log_changed", experiment_log)
def on_experiment_log_changed(self, experiment_log):
log_files = []
if experiment_log and path(experiment_log.directory).isdir():
for d in path(experiment_log.directory).dirs():
f = d / path("data")
if f.isfile():
try:
# cast log directory names as integers so that
# they sort numerically, not as strings
log_files.append(int(d.name))
except ValueError:
log_files.append(d.name)
log_files.sort()
self.combobox_log_files.clear()
combobox_set_model_from_list(self.combobox_log_files, log_files)
# changing the combobox log files will force an update
if len(log_files):
self.combobox_log_files.set_active(len(log_files)-1)
if self.notebook_manager_view is not None:
# Update active notebook directory for notebook_manager_view.
log_root = self.get_selected_log_root()
self.notebook_manager_view.notebook_dir = log_root
def on_new_experiment(self, widget=None, data=None):
logger.info('New experiment clicked')
self.save()
def on_plugin_enable(self):
super(ExperimentLogController, self).on_plugin_enable()
app = get_app()
app.experiment_log_controller = self
self.menu_new_experiment = app.builder.get_object('menu_new_experiment')
app.signals["on_menu_new_experiment_activate"] = self.on_new_experiment
self.menu_new_experiment.set_sensitive(False)
self.window.set_title("Experiment logs")
self.builder.connect_signals(self)
app_values = self.get_app_values()
# Create buttons to manage background IPython notebook sessions.
# Sessions are killed when microdrop exits.
self.notebook_manager_view = NotebookManagerView()
self.apply_notebook_dir(app_values['notebook_directory'])
vbox = self.builder.get_object('vbox1')
hbox = gtk.HBox()
label = gtk.Label('IPython notebook:')
hbox.pack_start(label, False, False)
hbox.pack_end(self.notebook_manager_view.widget, False, False)
vbox.pack_start(hbox, False, False)
vbox.reorder_child(hbox, 1)
hbox.show_all()
def on_button_export_data_clicked(self, widget, data=None):
export_path = path(os.path.join(self.results.log.directory,
str(self.results.log.experiment_id),
'data.xlsx'))
if export_path.exists():
result = yesno('Export file already exists. Would like '
'like to overwrite it? Selecting "No" will '
'open the existing file.', default=gtk.RESPONSE_NO)
if not export_path.exists() or result == gtk.RESPONSE_YES:
export_data = emit_signal('on_export_experiment_log_data',
self.results.log)
if export_data:
writer = pd.ExcelWriter(export_path, engine='openpyxl')
for i, (plugin_name, plugin_data) in enumerate(export_data.iteritems()):
for name, df in plugin_data.iteritems():
# Excel sheet names have a 31 character max
sheet_name = ('%03d-%s' % (i, name))[:31]
df.to_excel(writer, sheet_name)
try:
writer.save()
except IOError:
logger.warning("Error writing to file (maybe it is already open?).")
else:
logger.warning("No data to export.")
# launch the file in excel
if export_path.exists():
export_path.startfile()
def on_protocol_pause(self):
app = get_app()
# if we're on the last step of the last repetition, start a new
# experiment log
if ((app.protocol.current_step_number == len(app.protocol) - 1) and
(app.protocol.current_repetition == app.protocol.n_repeats - 1)):
result = yesno('Experiment complete. Would you like to start a new experiment?')
if result == gtk.RESPONSE_YES:
self.save()
def on_step_run(self):
app = get_app()
if app.running or app.realtime_mode:
emit_signal('on_step_complete', [self.name, None])
self.menu_new_experiment.set_sensitive(True)
def on_treeview_protocol_button_press_event(self, widget, event):
if event.button == 3:
self.popup.popup(event)
return True
def on_treeview_selection_changed(self, widget, data=None):
emit_signal("on_experiment_log_selection_changed",
[self.get_selected_data()])
def on_window_delete_event(self, widget, data=None):
self.window.hide()
return True
def on_window_show(self, widget, data=None):
self.window.show()
###########################################################################
# Mutator methods
def apply_notebook_dir(self, notebook_directory):
'''
Set the notebook directory to the specified directory.
If the specified directory is empty or `None`, use the default
directory (i.e., in the default MicroDrop user directory) as the new
directory path.
If no directory was previously set and the specified directory does not
exist, copy the default set of notebooks from the `microdrop` package
to the new notebook directory.
If a directory was previously set, copy the contents of the previous
directory to the new directory (prompting the user to overwrite if the
new directory already exists).
'''
app = get_app()
print '[{notebook_directory = "%s"}]' % notebook_directory
if not notebook_directory:
# The notebook directory is not set (i.e., empty or `None`), so set
# a default.
data_directory = path(app.config.data['data_dir'])
notebook_directory = data_directory.joinpath( 'notebooks')
print '[{new notebook_directory = "%s"}]' % notebook_directory
app_values = self.get_app_values().copy()
app_values['notebook_directory'] = notebook_directory
self.set_app_values(app_values)
if self.previous_notebook_dir and (notebook_directory ==
self.previous_notebook_dir):
# If the data directory hasn't changed, we do nothing
return False
notebook_directory = path(notebook_directory)
if self.previous_notebook_dir:
notebook_directory.makedirs_p()
if notebook_directory.listdir():
result = yesno('Merge?',
'Target directory [%s] is not empty. Merge '
'contents with current notebooks [%s] '
'(overwriting common paths in the target '
'directory)?' % (notebook_directory,
self.previous_notebook_dir))
if not result == gtk.RESPONSE_YES:
return False
original_directory = path(self.previous_notebook_dir)
for d in original_directory.dirs():
copytree(d, notebook_directory.joinpath(d.name))
for f in original_directory.files():
f.copyfile(notebook_directory.joinpath(f.name))
original_directory.rmtree()
elif not notebook_directory.isdir():
# if the notebook directory doesn't exist, copy the skeleton dir
if notebook_directory.parent:
notebook_directory.parent.makedirs_p()
skeleton_dir = path(pkg_resources.resource_filename('microdrop',
'static'))
skeleton_dir.joinpath('notebooks').copytree(notebook_directory)
self.previous_notebook_dir = notebook_directory
# Set the default template directory of the IPython notebook manager
# widget to the notebooks directory.
self.notebook_manager_view.template_dir = notebook_directory
def save(self):
app = get_app()
# Only save the current log if it is not empty (i.e., it contains at
# least one step).
if (hasattr(app, 'experiment_log') and app.experiment_log and
[x for x in app.experiment_log.get('step') if x is not None]):
data = {'software version': app.version}
data['device name'] = app.dmf_device.name
data['protocol name'] = app.protocol.name
plugin_versions = {}
for name in get_service_names(env='microdrop.managed'):
service = get_service_instance_by_name(name)
if service._enable:
plugin_versions[name] = str(service.version)
data['plugins'] = plugin_versions
app.experiment_log.add_data(data)
log_path = app.experiment_log.save()
# Save the protocol to experiment log directory.
app.protocol.save(os.path.join(log_path, 'protocol'))
# Convert device to SVG string.
svg_unicode = app.dmf_device.to_svg()
# Save the device to experiment log directory.
with open(os.path.join(log_path, DEVICE_FILENAME), 'wb') as output:
output.write(svg_unicode)
# create a new log
experiment_log = ExperimentLog(app.experiment_log.directory)
emit_signal('on_experiment_log_changed', experiment_log)
# disable new experiment menu until a step has been run (i.e., until
# we have some data in the log)
self.menu_new_experiment.set_sensitive(False)
def update(self):
app = get_app()
if not app.experiment_log:
self._disable_gui_elements()
return
try:
log_root = self.get_selected_log_root()
log = log_root.joinpath("data")
protocol = log_root.joinpath("protocol")
dmf_device = log_root.joinpath(DEVICE_FILENAME)
self.results = self.Results(ExperimentLog.load(log),
Protocol.load(protocol),
DmfDevice.load(dmf_device,
name=dmf_device.parent
.name))
self._enable_gui_elements()
notes_path = self.results.log.get_log_path() / 'notes.txt'
if not notes_path.isfile():
data = self.results.log.get("notes")
for i, val in enumerate(data):
if val is not None:
logger.info('Write out experiment notes to notes.txt')
notes_path.write_text(val)
# delete the notes from the experiment log
del self.results.log.data[i]['core']['notes']
self.results.log.save()
continue
label = "UUID: %s" % self.results.log.uuid
self.builder.get_object("label_uuid"). \
set_text(label)
label = "Software version: "
data = self.results.log.get("software version")
for val in data:
if val:
label += val
self.builder.get_object("label_software_version"). \
set_text(label)
label = "Device: "
data = self.results.log.get("device name")
for val in data:
if val:
label += val
self.builder.get_object("label_device"). \
set_text(label)
data = self.results.log.get("protocol name")
label = "Protocol: None"
for val in data:
if val:
label = "Protocol: %s" % val
self.builder.get_object("label_protocol"). \
set_text(label)
label = "Control board: "
data = self.results.log.get("control board name")
for val in data:
if val:
label += val
data = self.results.log.get("control board hardware version")
for val in data:
if val:
label += " v%s" % val
id = ""
data = self.results.log.get("control board serial number")
for val in data:
if val:
id = ", S/N %03d" % val
data = self.results.log.get("control board id")
for val in data:
if val:
id += ", id: %s" % val
data = self.results.log.get("control board uuid")
for val in data:
if val:
id += ", uuid: %s..." % str(val)[:8]
data = self.results.log.get("control board software version")
for val in data:
if val:
label += "\n\t(Firmware: %s%s)" % (val, id)
data = self.results.log.get("i2c devices")
for val in data:
if val:
label += "\ni2c devices:"
for address, description in sorted(val.items()):
label += "\n\t%d: %s" % (address, description)
self.builder.get_object("label_control_board"). \
set_text(label)
label = "Enabled plugins: "
data = self.results.log.get("plugins")
for val in data:
if val:
for k, v in val.iteritems():
label += "\n\t%s %s" % (k, v)
self.builder.get_object("label_plugins"). \
set_text(label)
label = "Time of experiment: "
data = self.results.log.get("start time")
for val in data:
if val:
label += time.ctime(val)
self.builder.get_object("label_experiment_time"). \
set_text(label)
self._clear_list_columns()
types = []
for i, c in enumerate(self.columns):
types.append(c.type)
self._add_list_column(c.name, i, c.format_string)
protocol_list = gtk.ListStore(*types)
self.protocol_view.set_model(protocol_list)
for d in self.results.log.data:
if 'step' in d['core'].keys() and 'time' in d['core'].keys():
# Only show steps that exist in the protocol (See:
# http://microfluidics.utoronto.ca/microdrop/ticket/153)
#
# This prevents "list index out of range" errors, if a step
# that was saved to the experiment log is deleted, but it is
# still possible to have stale data if the protocol is
# edited in real-time mode.
if d['core']['step'] < len(self.results.protocol):
step = self.results.protocol[d['core']['step']]
dmf_plugin_name = step.plugin_name_lookup(
r'wheelerlab.dmf_control_board', re_pattern=True)
options = step.get_data(dmf_plugin_name)
vals = []
if not options:
continue
for i, c in enumerate(self.columns):
if c.name=="Time (s)":
vals.append(d['core']['time'])
elif c.name=="Step #":
vals.append(d['core']['step'] + 1)
elif c.name=="Duration (s)":
vals.append(options.duration / 1000.0)
elif c.name=="Voltage (VRMS)":
vals.append(options.voltage)
elif c.name=="Frequency (kHz)":
vals.append(options.frequency / 1000.0)
else:
vals.append(None)
protocol_list.append(vals)
except Exception, why:
logger.info("[ExperimentLogController].update(): %s" % why)
self._disable_gui_elements()
###########################################################################
# Accessor methods
def get_schedule_requests(self, function_name):
"""
Returns a list of scheduling requests (i.e., ScheduleRequest
instances) for the function specified by function_name.
"""
if function_name == 'on_experiment_log_changed':
# ensure that the app's reference to the new experiment log gets set
return [ScheduleRequest('microdrop.app', self.name)]
elif function_name == 'on_plugin_enable':
# We use the notebook directory path stored in the configuration in
# the `on_plugin_enable` method. Therefore, we need to schedule
# the `config_controller` plugin to handle the `on_plugin_enable`
# first, so the configuration will be loaded before reading the
# notebook directory.
return [ScheduleRequest('microdrop.gui.config_controller',
self.name)]
return []
def get_selected_log_root(self):
app = get_app()
id = combobox_get_active_text(self.combobox_log_files)
return path(app.experiment_log.directory) / path(id)
def get_selected_data(self):
selection = self.protocol_view.get_selection().get_selected_rows()
selected_data = []
for row in selection[1]:
for d in self.results.log.data:
if 'time' in d['core'].keys():
if d['core']['time']==selection[0][row][0]:
selected_data.append(d)
return selected_data
###########################################################################
# Private methods
def _add_list_column(self, title, columnId, format_string=None):
"""
This function adds a column to the list view.
First it create the gtk.TreeViewColumn and then set
some needed properties
"""
cell = gtk.CellRendererText()
column = gtk.TreeViewColumn(title, cell, text=columnId)
column.set_resizable(True)
column.set_sort_column_id(columnId)
if format_string:
column.set_cell_data_func(cell,
self._cell_renderer_format,
format_string)
self.protocol_view.append_column(column)
def _cell_renderer_format(self, column, cell, model, iter, format_string):
val = model.get_value(iter, column.get_sort_column_id())
cell.set_property('text', format_string % val)
def _clear_list_columns(self):
while len(self.protocol_view.get_columns()):
self.protocol_view.remove_column(self.protocol_view.get_column(0))
def _disable_gui_elements(self):
for element_i in ['button_load_device', 'button_load_protocol',
'button_open', 'button_notes', 'button_export_data']:
self.builder.get_object(element_i).set_sensitive(False)
def _enable_gui_elements(self):
for element_i in ['button_load_device', 'button_load_protocol',
'button_open', 'button_notes', 'button_export_data']:
self.builder.get_object(element_i).set_sensitive(True)
PluginGlobals.pop_env()
|
gpl-3.0
|
daStrauss/subsurface
|
src/slvr/phaseSplit.py
|
1
|
9164
|
'''
Created on Nov 11, 2012
Copyright © 2013
The Board of Trustees of The Leland Stanford Junior University.
All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: dstrauss
implementation of contrast source ADMM optimization
with phase split modification.
This is a new experiment to see if I can bring any of the power of the phase split method to
light here?
So we know that X and u ought to have the same phase, but in the end, they rarely do. (odd? dontcha think?)
So lets add in the phase holding term. Cuz they ought to have the same phase.
'''
import numpy as np
import scipy.sparse as sparse
import scipy.sparse.linalg as lin
from mpi4py import MPI
import sparseTools as spTools
import scipy.io as spio
from optimize import optimizer
from superSolve import wrapCvxopt
import time
class problem(optimizer):
''' class that extents the contrast - Xadmm algorithm '''
def initOpt(self, uHat, D):
self.rho = D['rho']
self.xi = D['xi']
self.uHat = uHat
self.upperBound = D['uBound']
self.lmb = D['lmb']
self.obj = np.zeros(D['maxIter'])
# add some local vars for ease
self.s = self.fwd.getS() # 1j*self.muo*self.w
self.A = self.fwd.nabla2+self.fwd.getk(0)
self.gap = list()
self.objInt = list()
self.pL = list()
# oh. this is going to get strange.
# integrate TE concepts first.
# contrast variable
# self.X = np.zeros(self.fwd.nRx*self.fwd.nRy,dtype='complex128')
# dual variable
# self.Z = np.zeros(self.fwd.nRx*self.fwd.nRy,dtype='complex128')
# scattered fields
self.us = np.zeros(self.fwd.N,dtype='complex128')
# just to make life easier:
self.ub = self.fwd.sol[0] # shouldn't need --> .flatten()
self.scaleC = 1.0 # 1.0/np.linalg.norm(self.fwd.Ms*self.ub)
# create some new operators for doing what is necessary for the
# contrast X work
self.pp = np.zeros(self.fwd.getXSize(),dtype='complex128')
self.X = np.zeros(self.fwd.getXSize(),dtype='complex128')
self.Z = np.zeros(self.fwd.getXSize(),dtype='complex128')
self.tD = np.zeros(self.fwd.nRx*self.fwd.nRy,dtype='complex128')
self.tL = np.zeros(self.fwd.nRx*self.fwd.nRy,dtype='complex128')
self.fwd.setCTRX()
''' subtract out the background field '''
self.uHat = self.uHat - self.fwd.Ms*self.ub
''' in this instance, I don't care about the results, i.e., I don't care about the actual solutions'''
def internalHard(self, thk):
'''creates the matrix every time, a hard alternative, to internalSymbolic
so that I can do A/B testing easily w.r.t the old standard'''
nX = self.fwd.getXSize()
pm = sparse.spdiags(self.s*self.fwd.p2x*thk, 0, nX, nX)
plp = sparse.spdiags(np.exp(1j*self.pp),0,nX,nX)
# print pm.shape
# print self.fwd.x2u.shape
''' ds changes at ever iteration '''
ds = pm*self.fwd.x2u.T # The sampling and material scaling.
# Construct the KKT Matrix
''' changes '''
bmuu = self.scaleC*(self.fwd.Ms.T*self.fwd.Ms) + self.rho*(ds.T.conj()*ds)
bmux = -self.rho*(ds.T.conj()*plp)
bmxu = -self.rho*(plp.T.conj()*ds)
''' static '''
bmul = self.A.T.conj()
bmxx = self.rho*sparse.eye(nX, nX)
bmxl = plp.T.conj()*self.fwd.x2u.T
bmlu = self.A
bmlx = self.fwd.x2u*plp
bmll = sparse.coo_matrix((self.fwd.N, self.fwd.N))
''' right hand side '''
rhsu = self.fwd.Ms.T.conj()*self.uHat - self.rho*(ds.T.conj()*ds)*self.ub + self.rho*ds.T.conj()*self.Z
rhsx = self.rho*plp.T.conj()*(ds*self.ub - self.Z) # chng
rhsl = np.zeros(self.fwd.N)
bm = spTools.vCat([spTools.hCat([bmuu, bmux, bmul]), \
spTools.hCat([bmxu, bmxx, bmxl]), \
spTools.hCat([bmlu, bmlx, bmll])])
rhsbm = np.concatenate((rhsu, rhsx, rhsl))
updt = lin.spsolve(bm.tocsr(), rhsbm)
# N = self.nx*self.ny
us = updt[:self.fwd.N]
x = updt[self.fwd.N:(self.fwd.N+nX)]
return us,x
def runOpt(self,P):
''' to run at each layer at each iteration '''
''' the global tT update happens effectively here -- in parallel with u,x update'''
''' jointly update u,x '''
# pfake = (self.upperBound/2.0)*np.ones(self.fwd.getXSize(),dtype='complex128')
self.pp = np.angle(self.fwd.x2u.T*(self.us + self.ub))
nX = self.fwd.getXSize()
plp = sparse.spdiags(np.exp(1j*self.pp),0,nX,nX)
self.us,self.X = self.internalHard(self.tL)
''' do some accounting '''
self.gap.append(np.linalg.norm(plp*self.X - self.tL*(self.s*self.fwd.Md*(self.us+self.ub))))
''' update tL '''
self.tL = self.updateThetaLocal(P)
self.pL.append(self.tL)
'''update dual variables last '''
self.Z = self.Z + (plp*self.X - (self.s*self.fwd.x2u.T*(self.ub + self.us))*(self.fwd.p2x*self.tL))
self.tD = self.tD + (self.tL-P)
obj = np.linalg.norm(self.uHat-self.fwd.Ms*self.us)
self.objInt.append(obj)
return obj
def updateThetaLocal(self,P):
'''routine that solves for a local copy of the parameters theta '''
uL = self.s*self.fwd.Md*(self.us + self.ub)
nX = self.fwd.getXSize()
plp = sparse.spdiags(np.exp(1j*self.pp),0,nX,nX)
bL = plp*self.X + self.Z
localT = (P - self.tD)
theta = (self.rho*(uL.conj()*bL) + self.xi*(localT))/(self.rho*(uL.conj()*uL) + self.xi + self.lmb)
theta = theta.real
theta = np.maximum(theta,0)
theta = np.minimum(theta,self.upperBound)
return theta
def writeOut(self, rank, ix=0):
import os
assert os.path.exists(self.outDir + 'Data')
us = self.fwd.parseFields(self.us)
ub = self.fwd.parseFields(self.fwd.sol[0])
sgmm = self.fwd.parseFields(self.fwd.sigmap[0])
uTrue = self.fwd.parseFields(self.fwd.sol[1])
D = {'f':self.fwd.f, 'angle':self.fwd.incAng, 'sigMat':sgmm[0], 'ub':ub[0], \
'us':us[0], 'uTrue':uTrue[0], \
'X':self.X, 'obj':self.obj, 'flavor':self.fwd.flavor, 'gap':self.gap, \
'obj':self.objInt, 'Ms':self.fwd.Ms, 'phist':self.pL}
spio.savemat(self.outDir + 'Data/contrastX' + repr(rank) + '_' + repr(ix), D)
def aggregatorSemiParallel(self,S, comm):
''' Do the aggregation step in parallel whoop!
Revised to be just a simple aggregation/mean step '''
n = self.fwd.nRx*self.fwd.nRy
tt = self.tL + self.tD
T = np.zeros(n,dtype='complex128')
T = comm.allreduce(tt,T,op=MPI.SUM)
T = T/comm.Get_size()
return T
def plotSemiParallel(self,P,resid,rank,ix=0):
''' Plotting routine if things are semiParallel'''
import matplotlib.pyplot as plt
plt.close('all')
import os
plt.close('all')
if not os.path.exists(self.outDir + 'Figs'):
os.mkdir(self.outDir + 'Figs')
# vv = S.Ms*S.v
uu = self.fwd.Ms*self.us
ub = self.fwd.Ms*self.ub
skt = self.uHat-ub
plt.figure(100 + rank + 10*ix)
plt.plot(np.arange(self.fwd.nSen), skt.real, np.arange(self.fwd.nSen), uu.real)
plt.savefig(self.outDir + 'Figs/fig' + repr(100+rank+10*ix))
if (rank==0) & (ix==0):
# then print some figures
plt.figure(383)
plt.plot(resid)
plt.savefig(self.outDir + 'Figs/fig383')
plt.figure(387)
plt.imshow(P.reshape(self.fwd.nRx,self.fwd.nRy), interpolation='nearest')
plt.colorbar()
plt.savefig(self.outDir + 'Figs/fig387')
us = self.fwd.parseFields(self.us)
plt.figure(76)
plt.subplot(121)
plt.imshow(us[0].real)
plt.colorbar()
plt.subplot(122)
plt.imshow(us[0].imag)
plt.colorbar()
plt.savefig(self.outDir + 'Figs/fig76')
# all show!
# plt.show()
|
apache-2.0
|
jlegendary/scikit-learn
|
sklearn/cluster/__init__.py
|
364
|
1228
|
"""
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
|
bsd-3-clause
|
jselsing/GRB111117A
|
py/line_flux_nii.py
|
1
|
3380
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
from astropy.io import fits
import pandas as pd
import matplotlib.pyplot as pl
import seaborn as sns; sns.set_style('ticks')
import numpy as np
from scipy import stats, interpolate
import matplotlib as mpl
from astropy.convolution import Gaussian1DKernel, Gaussian2DKernel, convolve
from astropy.cosmology import Planck15 as cosmo
from astropy import units as u
from astropy import constants as c
params = {
'axes.labelsize': 10,
'font.size': 10,
'legend.fontsize': 10,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'text.usetex': False,
'figure.figsize': [7.281, 4.5]
}
def convert_air_to_vacuum(air_wave) :
# convert air to vacuum. Based onhttp://idlastro.gsfc.nasa.gov/ftp/pro/astro/airtovac.pro
# taken from https://github.com/desihub/specex/blob/master/python/specex_air_to_vacuum.py
sigma2 = (1e4/air_wave)**2
fact = 1. + 5.792105e-2/(238.0185 - sigma2) + 1.67917e-3/( 57.362 - sigma2)
vacuum_wave = air_wave*fact
# comparison with http://www.sdss.org/dr7/products/spectra/vacwavelength.html
# where : AIR = VAC / (1.0 + 2.735182E-4 + 131.4182 / VAC^2 + 2.76249E8 / VAC^4)
# air_wave=numpy.array([4861.363,4958.911,5006.843,6548.05,6562.801,6583.45,6716.44,6730.82])
# expected_vacuum_wave=numpy.array([4862.721,4960.295,5008.239,6549.86,6564.614,6585.27,6718.29,6732.68])
return vacuum_wave
def main():
"""
# Script to get lineflux
"""
# Get extraction
data = np.genfromtxt("../data/NIRext.dat", dtype=None)
# data = np.genfromtxt("../data/NIROB4skysubstdext.dat", dtype=None)
# print(np.std([18, 31, 38, 20]))
# exit()
wl = data[:, 1]
flux = data[:, 2]
error = data[:, 3]
error[error > 1e-15] = np.median(error)
# Load synthetic sky
sky_model = fits.open("../data/NIRskytable.fits")
wl_sky = 10*(sky_model[1].data.field('lam')) # In nm
# Convolve to observed grid
f = interpolate.interp1d(wl_sky, convolve(sky_model[1].data.field('flux'), Gaussian1DKernel(stddev=3)), bounds_error=False, fill_value=np.nan)
# f = interpolate.interp1d(wl_sky, sky_model[1].data.field('flux'), bounds_error=False, fill_value=np.nan)
flux_sky = f(wl)
flux[(wl > 21055) & (wl < 21068)] = np.nan
flux[(wl > 21098) & (wl < 21102)] = np.nan
flux[(wl > 21109) & (wl < 21113)] = np.nan
m = (flux_sky < 10000) & ~np.isnan(flux)
g = interpolate.interp1d(wl[m], flux[m], bounds_error=False, fill_value=np.nan)
flux = g(wl)
mask = (wl > convert_air_to_vacuum(21144) - 20) & (wl < convert_air_to_vacuum(21144) + 20)
F_nii = np.trapz(flux[mask])
F_nii_err = np.sqrt(np.trapz((error**2.)[mask]))
print("Total flux: %0.1e +- %0.1e" % (F_nii, F_nii_err))
# flux[flux_sky > 10000] = np.nan
pl.plot(wl, flux, label = "Spectrum")
pl.plot(wl[mask], flux[mask], label = "Integration limits")
pl.plot(wl, flux_sky*1e-22, label = "Sky spectrum")
pl.errorbar(wl, flux, yerr=error, fmt=".k", capsize=0, elinewidth=0.5, ms=3, label=r"f$_{[NII]}$ = %0.1e +- %0.1e" % (F_nii, F_nii_err))
pl.xlim(21100, 21200)
pl.ylim(-0.5e-17, 3e-17)
# pl.show()
# Save figure for tex
pl.legend()
pl.savefig("../figures/nii_flux.pdf", dpi="figure")
pl.show()
if __name__ == '__main__':
main()
|
mit
|
stephenbalaban/keras
|
examples/addition_rnn.py
|
50
|
5900
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from keras.models import Sequential, slice_X
from keras.layers.core import Activation, Dense, RepeatVector
from keras.layers import recurrent
from sklearn.utils import shuffle
import numpy as np
"""
An implementation of sequence to sequence learning for performing addition
Input: "535+61"
Output: "596"
Padding is handled by using a repeated sentinel character (space)
By default, the JZS1 recurrent neural network is used
JZS1 was an "evolved" recurrent neural network performing well on arithmetic benchmark in:
"An Empirical Exploration of Recurrent Network Architectures"
http://jmlr.org/proceedings/papers/v37/jozefowicz15.pdf
Input may optionally be inverted, shown to increase performance in many tasks in:
"Learning to Execute"
http://arxiv.org/abs/1410.4615
and
"Sequence to Sequence Learning with Neural Networks"
http://papers.nips.cc/paper/5346-sequence-to-sequence-learning-with-neural-networks.pdf
Theoretically it introduces shorter term dependencies between source and target.
Two digits inverted:
+ One layer JZS1 (128 HN), 5k training examples = 99% train/test accuracy in 55 epochs
Three digits inverted:
+ One layer JZS1 (128 HN), 50k training examples = 99% train/test accuracy in 100 epochs
Four digits inverted:
+ One layer JZS1 (128 HN), 400k training examples = 99% train/test accuracy in 20 epochs
Five digits inverted:
+ One layer JZS1 (128 HN), 550k training examples = 99% train/test accuracy in 30 epochs
"""
class CharacterTable(object):
"""
Given a set of characters:
+ Encode them to a one hot integer representation
+ Decode the one hot integer representation to their character output
+ Decode a vector of probabilties to their character output
"""
def __init__(self, chars, maxlen):
self.chars = sorted(set(chars))
self.char_indices = dict((c, i) for i, c in enumerate(self.chars))
self.indices_char = dict((i, c) for i, c in enumerate(self.chars))
self.maxlen = maxlen
def encode(self, C, maxlen=None):
maxlen = maxlen if maxlen else self.maxlen
X = np.zeros((maxlen, len(self.chars)))
for i, c in enumerate(C):
X[i, self.char_indices[c]] = 1
return X
def decode(self, X, calc_argmax=True):
if calc_argmax:
X = X.argmax(axis=-1)
return ''.join(self.indices_char[x] for x in X)
class colors:
ok = '\033[92m'
fail = '\033[91m'
close = '\033[0m'
# Parameters for the model and dataset
TRAINING_SIZE = 50000
DIGITS = 3
INVERT = True
# Try replacing JZS1 with LSTM, GRU, or SimpleRNN
RNN = recurrent.JZS1
HIDDEN_SIZE = 128
BATCH_SIZE = 128
LAYERS = 1
MAXLEN = DIGITS + 1 + DIGITS
chars = '0123456789+ '
ctable = CharacterTable(chars, MAXLEN)
questions = []
expected = []
seen = set()
print('Generating data...')
while len(questions) < TRAINING_SIZE:
f = lambda: int(''.join(np.random.choice(list('0123456789')) for i in xrange(np.random.randint(1, DIGITS + 1))))
a, b = f(), f()
# Skip any addition questions we've already seen
# Also skip any such that X+Y == Y+X (hence the sorting)
key = tuple(sorted((a, b)))
if key in seen:
continue
seen.add(key)
# Pad the data with spaces such that it is always MAXLEN
q = '{}+{}'.format(a, b)
query = q + ' ' * (MAXLEN - len(q))
ans = str(a + b)
# Answers can be of maximum size DIGITS + 1
ans += ' ' * (DIGITS + 1 - len(ans))
if INVERT:
query = query[::-1]
questions.append(query)
expected.append(ans)
print('Total addition questions:', len(questions))
print('Vectorization...')
X = np.zeros((len(questions), MAXLEN, len(chars)), dtype=np.bool)
y = np.zeros((len(questions), DIGITS + 1, len(chars)), dtype=np.bool)
for i, sentence in enumerate(questions):
X[i] = ctable.encode(sentence, maxlen=MAXLEN)
for i, sentence in enumerate(expected):
y[i] = ctable.encode(sentence, maxlen=DIGITS + 1)
# Shuffle (X, y) in unison as the later parts of X will almost all be larger digits
X, y = shuffle(X, y)
# Explicitly set apart 10% for validation data that we never train over
split_at = len(X) - len(X) / 10
(X_train, X_val) = (slice_X(X, 0, split_at), slice_X(X, split_at))
(y_train, y_val) = (y[:split_at], y[split_at:])
print('Build model...')
model = Sequential()
# "Encode" the input sequence using an RNN, producing an output of HIDDEN_SIZE
model.add(RNN(len(chars), HIDDEN_SIZE))
# For the decoder's input, we repeat the encoded input for each time step
model.add(RepeatVector(DIGITS + 1))
# The decoder RNN could be multiple layers stacked or a single layer
for _ in xrange(LAYERS):
model.add(RNN(HIDDEN_SIZE, HIDDEN_SIZE, return_sequences=True))
# For each of step of the output sequence, decide which character should be chosen
model.add(Dense(HIDDEN_SIZE, len(chars)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
# Train the model each generation and show predictions against the validation dataset
for iteration in range(1, 200):
print()
print('-' * 50)
print('Iteration', iteration)
model.fit(X, y, batch_size=BATCH_SIZE, nb_epoch=1, validation_data=(X_val, y_val), show_accuracy=True)
###
# Select 10 samples from the validation set at random so we can visualize errors
for i in xrange(10):
ind = np.random.randint(0, len(X_val))
rowX, rowy = X_val[np.array([ind])], y_val[np.array([ind])]
preds = model.predict_classes(rowX, verbose=0)
q = ctable.decode(rowX[0])
correct = ctable.decode(rowy[0])
guess = ctable.decode(preds[0], calc_argmax=False)
print('Q', q[::-1] if INVERT else q)
print('T', correct)
print(colors.ok + '☑' + colors.close if correct == guess else colors.fail + '☒' + colors.close, guess)
print('---')
|
mit
|
willingc/oh-mainline
|
vendor/packages/mechanize/test/test_performance.py
|
22
|
2573
|
import os
import time
import sys
import unittest
import mechanize
from mechanize._testcase import TestCase, TempDirMaker
from mechanize._rfc3986 import urljoin
KB = 1024
MB = 1024**2
GB = 1024**3
def time_it(operation):
t = time.time()
operation()
return time.time() - t
def write_data(filename, nr_bytes):
block_size = 4096
block = "01234567" * (block_size // 8)
fh = open(filename, "w")
try:
for i in range(nr_bytes // block_size):
fh.write(block)
finally:
fh.close()
def time_retrieve_local_file(temp_maker, size, retrieve_fn):
temp_dir = temp_maker.make_temp_dir()
filename = os.path.join(temp_dir, "data")
write_data(filename, size)
def operation():
retrieve_fn(urljoin("file://", filename),
os.path.join(temp_dir, "retrieved"))
return time_it(operation)
class PerformanceTests(TestCase):
def test_retrieve_local_file(self):
def retrieve(url, filename):
br = mechanize.Browser()
br.retrieve(url, filename)
size = 100 * MB
# size = 1 * KB
desired_rate = 2*MB # per second
desired_time = size / float(desired_rate)
fudge_factor = 2.
self.assert_less_than(
time_retrieve_local_file(self, size, retrieve),
desired_time * fudge_factor)
def show_plot(rows):
import matplotlib.pyplot
figure = matplotlib.pyplot.figure()
axes = figure.add_subplot(111)
axes.plot([row[0] for row in rows], [row[1] for row in rows])
matplotlib.pyplot.show()
def power_2_range(start, stop):
n = start
while n <= stop:
yield n
n *= 2
def performance_plot():
def retrieve(url, filename):
br = mechanize.Browser()
br.retrieve(url, filename)
# import urllib2
# def retrieve(url, filename):
# urllib2.urlopen(url).read()
# from mechanize import _useragent
# ua = _useragent.UserAgent()
# ua.set_seekable_responses(True)
# ua.set_handle_equiv(False)
# def retrieve(url, filename):
# ua.retrieve(url, filename)
rows = []
for size in power_2_range(256 * KB, 256 * MB):
temp_maker = TempDirMaker()
try:
elapsed = time_retrieve_local_file(temp_maker, size, retrieve)
finally:
temp_maker.tear_down()
rows.append((size//float(MB), elapsed))
show_plot(rows)
if __name__ == "__main__":
args = sys.argv[1:]
if "--plot" in args:
performance_plot()
else:
unittest.main()
|
agpl-3.0
|
jamesmishra/nlp-playground
|
nlp_playground/scripts/newsgroup_classifier.py
|
1
|
2582
|
"""Classification experiments with 20newsgroups."""
import numpy as np
from dask_searchcv import GridSearchCV
from hyperdash import monitor
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import classification_report
from sklearn.pipeline import Pipeline
from nlp_playground.data import glove_simple
from nlp_playground.lib.sklearn.gridsearch import print_best_worst
from nlp_playground.lib.sklearn.transformers import WordVectorSum
CATEGORIES = ['alt.atheism', 'soc.religion.christian',
'comp.graphics', 'sci.med']
@monitor(__file__)
def main():
"""
Train a classifier on the 20 newsgroups dataset.
The purpose of this is mostly trying to figure out how
to turn text into really good vector representations
for classification... which are also hopefully good
vector representations for unsupervised learning too.
"""
# We don't really use our interfaces for iterating over datasets...
# but maybe we will in the future.
train = fetch_20newsgroups(
subset='train',
# categories=CATEGORIES,
shuffle=True
)
test = fetch_20newsgroups(
subset='test',
# categories=CATEGORIES,
shuffle=True
)
print("Loaded data.", len(set(train.target)), "classes.")
glove_vectors = glove_simple()
print("Loaded word vectors")
pipeline = Pipeline([
# ('vec', TfidfVectorizer()),
('vec', WordVectorSum(vector_dict=glove_vectors)),
# ('svd', TruncatedSVD()),
('fit', SGDClassifier())
])
print("Defined pipeline. Beginning fit.")
gridsearch = GridSearchCV(
pipeline,
{
# 'vec__stop_words': ('english',),
# 'svd__n_components': (2, 100, 500, 1000),
# 'vec__min_df': (1, 0.01, 0.1, 0.4),
# 'vec__max_df': (0.5, 0.75, 0.9, 1.0),
# 'vec__max_features': (100, 1000, 10000)
}
)
gridsearch.fit(train.data, train.target)
print("Completed fit. Beginning prediction")
predicted = gridsearch.predict(test.data)
print("Completed prediction.")
accuracy = np.mean(predicted == test.target)
print("Accuracy was", accuracy)
print("Best params", gridsearch.best_params_)
print_best_worst(gridsearch.cv_results_)
print(
classification_report(
test.target,
predicted,
target_names=test.target_names))
|
mit
|
mjschust/conformal-blocks
|
experiments/triviality.py
|
1
|
2057
|
'''
Created on Nov 21, 2016
@author: mjschust
'''
from __future__ import division
import conformal_blocks.cbbundle as cbd
import math, cProfile, time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#Computes all non-trivial 4-point conformal blocks divisors of specified Lie rank and level.
def experiment():
rank = 3
level = 10
liealg = cbd.TypeALieAlgebra(rank, store_fusion=True, exact=False)
print("Weight", "Rank", "Divisor")
trivial_x = []
trivial_y = []
trivial_z = []
nontrivial_x = []
nontrivial_y = []
nontrivial_z = []
for wt in liealg.get_weights(level):
cbb = cbd.SymmetricConformalBlocksBundle(liealg, wt, 4, level)
if cbb.get_rank() == 0: continue
#tot_weight = wt.fund_coords[0] + 2*wt.fund_coords[1] + 3*wt.fund_coords[2]
#if tot_weight <= level: continue
#tot_weight = 3*wt.fund_coords[0] + 2*wt.fund_coords[1] + wt.fund_coords[2]
#if tot_weight <= level: continue
#if level >= tot_weight // (r+1): continue
divisor = cbb.get_symmetrized_divisor()
if divisor[0] == 0:
trivial_x.append(wt[0])
trivial_y.append(wt[2])
trivial_z.append(wt[1])
else:
nontrivial_x.append(wt[0])
nontrivial_y.append(wt[2])
nontrivial_z.append(wt[1])
print(wt, cbb.get_rank(), divisor[0])
# Plot the results
#fig, ax = plt.subplots()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(trivial_x, trivial_y, zs=trivial_z, c='black', label="Trivial divisor")
ax.scatter(nontrivial_x, nontrivial_y, zs=nontrivial_z, c='red', label="Non-trivial divisor")
ax.set_xlabel('a_1')
ax.set_ylabel('a_3')
ax.set_zlabel('a_2')
ax.legend()
ax.grid(True)
plt.show()
if __name__ == '__main__':
#t0 = time.clock()
experiment()
#print(time.clock() -t0)
#cProfile.run('experiment()', sort='cumtime')
|
mit
|
tomsilver/NAB
|
nab/runner.py
|
1
|
9309
|
# ----------------------------------------------------------------------
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import multiprocessing
import os
import pandas
try:
import simplejson as json
except ImportError:
import json
from nab.corpus import Corpus
from nab.detectors.base import detectDataSet
from nab.labeler import CorpusLabel
from nab.optimizer import optimizeThreshold
from nab.scorer import scoreCorpus
from nab.util import updateThresholds
class Runner(object):
"""
Class to run an endpoint (detect, optimize, or score) on the NAB
benchmark using the specified set of profiles, thresholds, and/or detectors.
"""
def __init__(self,
dataDir,
resultsDir,
labelPath,
profilesPath,
thresholdPath,
numCPUs=None):
"""
@param dataDir (string) Directory where all the raw datasets exist.
@param resultsDir (string) Directory where the detector anomaly scores
will be scored.
@param labelPath (string) Path where the labels of the datasets
exist.
@param profilesPath (string) Path to JSON file containing application
profiles and associated cost matrices.
@param thresholdPath (string) Path to thresholds dictionary containing the
best thresholds (and their corresponding
score) for a combination of detector and
user profile.
@probationaryPercent (float) Percent of each dataset which will be
ignored during the scoring process.
@param numCPUs (int) Number of CPUs to be used for calls to
multiprocessing.pool.map
"""
self.dataDir = dataDir
self.resultsDir = resultsDir
self.labelPath = labelPath
self.profilesPath = profilesPath
self.thresholdPath = thresholdPath
self.pool = multiprocessing.Pool(numCPUs)
self.probationaryPercent = 0.15
self.windowSize = 0.10
self.corpus = None
self.corpusLabel = None
self.profiles = None
def initialize(self):
"""Initialize all the relevant objects for the run."""
self.corpus = Corpus(self.dataDir)
self.corpusLabel = CorpusLabel(path=self.labelPath, corpus=self.corpus)
with open(self.profilesPath) as p:
self.profiles = json.load(p)
def detect(self, detectors):
"""Generate results file given a dictionary of detector classes
Function that takes a set of detectors and a corpus of data and creates a
set of files storing the alerts and anomaly scores given by the detectors
@param detectors (dict) Dictionary with key value pairs of a
detector name and its corresponding
class constructor.
"""
print "\nRunning detection step"
count = 0
args = []
for detectorName, detectorConstructor in detectors.iteritems():
for relativePath, dataSet in self.corpus.dataFiles.iteritems():
args.append(
(
count,
detectorConstructor(
dataSet=dataSet,
probationaryPercent=self.probationaryPercent),
detectorName,
self.corpusLabel.labels[relativePath]["label"],
self.resultsDir,
relativePath
)
)
count += 1
self.pool.map(detectDataSet, args)
def optimize(self, detectorNames):
"""Optimize the threshold for each combination of detector and profile.
@param detectorNames (list) List of detector names.
@return thresholds (dict) Dictionary of dictionaries with detector names
then profile names as keys followed by another
dictionary containing the score and the
threshold used to obtained that score.
"""
print "\nRunning optimize step"
scoreFlag = False
thresholds = {}
for detectorName in detectorNames:
resultsDetectorDir = os.path.join(self.resultsDir, detectorName)
resultsCorpus = Corpus(resultsDetectorDir)
thresholds[detectorName] = {}
for profileName, profile in self.profiles.iteritems():
thresholds[detectorName][profileName] = optimizeThreshold(
(self.pool,
detectorName,
profileName,
profile["CostMatrix"],
resultsDetectorDir,
resultsCorpus,
self.corpusLabel,
self.probationaryPercent,
scoreFlag))
updateThresholds(thresholds, self.thresholdPath)
return thresholds
def score(self, detectorNames, thresholds):
"""Score the performance of the detectors.
Function that must be called only after detection result files have been
generated and thresholds have been optimized. This looks at the result files
and scores the performance of each detector specified and stores these
results in a csv file.
@param detectorNames (list) List of detector names.
@param thresholds (dict) Dictionary of dictionaries with detector
names then profile names as keys followed by
another dictionary containing the score and
the threshold used to obtained that score.
"""
print "\nRunning scoring step"
scoreFlag = True
baselines = {}
self.resultsFiles = []
for detectorName in detectorNames:
resultsDetectorDir = os.path.join(self.resultsDir, detectorName)
resultsCorpus = Corpus(resultsDetectorDir)
for profileName, profile in self.profiles.iteritems():
threshold = thresholds[detectorName][profileName]["threshold"]
resultsDF = scoreCorpus(threshold,
(self.pool,
detectorName,
profileName,
profile["CostMatrix"],
resultsDetectorDir,
resultsCorpus,
self.corpusLabel,
self.probationaryPercent,
scoreFlag))
scorePath = os.path.join(resultsDetectorDir, "%s_%s_scores.csv" %\
(detectorName, profileName))
resultsDF.to_csv(scorePath, index=False)
print "%s detector benchmark scores written to %s" %\
(detectorName, scorePath)
self.resultsFiles.append(scorePath)
def normalize(self):
"""Normalize the detectors' scores according to the Baseline, and print to
the console.
Function can only be called with the scoring step (i.e. runner.score())
preceding it.
This reads the total score values from the results CSVs, and
adds the relevant baseline value. The scores are then normalized by
multiplying by 100/perfect, where the perfect score is the number of TPs
possible (i.e. 44.0).
Note the results CSVs still contain the original scores, not normalized.
"""
print "\nRunning score normalization step"
# Get baselines for each application profile.
baselineDir = os.path.join(self.resultsDir, "baseline")
if not os.path.isdir(baselineDir):
raise IOError("No results directory for baseline. You must "
"run the baseline detector before normalizing scores.")
baselines = {}
for profileName, _ in self.profiles.iteritems():
fileName = os.path.join(baselineDir,
"baseline_" + profileName + "_scores.csv")
with open(fileName) as f:
results = pandas.read_csv(f)
baselines[profileName] = results["Score"].iloc[-1]
# Normalize the score from each results file.
for resultsFile in self.resultsFiles:
profileName = [k for k in baselines.keys() if k in resultsFile][0]
base = baselines[profileName]
with open(resultsFile) as f:
results = pandas.read_csv(f)
perfect = 44.0 - base
score = (-base + results["Score"].iloc[-1]) * (100/perfect)
print ("Final score for \'%s\' = %.2f"
% (resultsFile.split('/')[-1][:-4], score))
|
gpl-3.0
|
ruohoruotsi/librosa
|
librosa/core/spectrum.py
|
1
|
36276
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Utilities for spectral processing'''
import numpy as np
import scipy.fftpack as fft
import scipy
import scipy.signal
import scipy.interpolate
import six
from . import time_frequency
from .. import cache
from .. import util
from ..util.decorators import moved
from ..util.deprecation import rename_kw, Deprecated
from ..util.exceptions import ParameterError
from ..filters import get_window
__all__ = ['stft', 'istft', 'magphase',
'ifgram', 'phase_vocoder',
'logamplitude', 'perceptual_weighting',
'power_to_db', 'db_to_power',
'amplitude_to_db', 'db_to_amplitude',
'fmt']
@cache(level=20)
def stft(y, n_fft=2048, hop_length=None, win_length=None, window='hann',
center=True, dtype=np.complex64):
"""Short-time Fourier transform (STFT)
Returns a complex-valued matrix D such that
`np.abs(D[f, t])` is the magnitude of frequency bin `f`
at frame `t`
`np.angle(D[f, t])` is the phase of frequency bin `f`
at frame `t`
Parameters
----------
y : np.ndarray [shape=(n,)], real-valued
the input signal (audio time series)
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
number audio of frames between STFT columns.
If unspecified, defaults `win_length / 4`.
win_length : int <= n_fft [scalar]
Each frame of audio is windowed by `window()`.
The window will be of length `win_length` and then padded
with zeros to match `n_fft`.
If unspecified, defaults to ``win_length = n_fft``.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a vector or array of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, the signal `y` is padded so that frame
`D[:, t]` is centered at `y[t * hop_length]`.
- If `False`, then `D[:, t]` begins at `y[t * hop_length]`
dtype : numeric type
Complex numeric type for `D`. Default is 64-bit complex.
Returns
-------
D : np.ndarray [shape=(1 + n_fft/2, t), dtype=dtype]
STFT matrix
See Also
--------
istft : Inverse STFT
ifgram : Instantaneous frequency spectrogram
Notes
-----
This function caches at level 20.
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> D = librosa.stft(y)
>>> D
array([[ 2.576e-03 -0.000e+00j, 4.327e-02 -0.000e+00j, ...,
3.189e-04 -0.000e+00j, -5.961e-06 -0.000e+00j],
[ 2.441e-03 +2.884e-19j, 5.145e-02 -5.076e-03j, ...,
-3.885e-04 -7.253e-05j, 7.334e-05 +3.868e-04j],
...,
[ -7.120e-06 -1.029e-19j, -1.951e-09 -3.568e-06j, ...,
-4.912e-07 -1.487e-07j, 4.438e-06 -1.448e-05j],
[ 7.136e-06 -0.000e+00j, 3.561e-06 -0.000e+00j, ...,
-5.144e-07 -0.000e+00j, -1.514e-05 -0.000e+00j]], dtype=complex64)
Use left-aligned frames, instead of centered frames
>>> D_left = librosa.stft(y, center=False)
Use a shorter hop length
>>> D_short = librosa.stft(y, hop_length=64)
Display a spectrogram
>>> import matplotlib.pyplot as plt
>>> librosa.display.specshow(librosa.amplitude_to_db(D,
... ref=np.max),
... y_axis='log', x_axis='time')
>>> plt.title('Power spectrogram')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.tight_layout()
"""
# By default, use the entire frame
if win_length is None:
win_length = n_fft
# Set the default hop, if it's not already specified
if hop_length is None:
hop_length = int(win_length // 4)
fft_window = get_window(window, win_length, fftbins=True)
# Pad the window out to n_fft size
fft_window = util.pad_center(fft_window, n_fft)
# Reshape so that the window can be broadcast
fft_window = fft_window.reshape((-1, 1))
# Pad the time series so that frames are centered
if center:
util.valid_audio(y)
y = np.pad(y, int(n_fft // 2), mode='reflect')
# Window the time series.
y_frames = util.frame(y, frame_length=n_fft, hop_length=hop_length)
# Pre-allocate the STFT matrix
stft_matrix = np.empty((int(1 + n_fft // 2), y_frames.shape[1]),
dtype=dtype,
order='F')
# how many columns can we fit within MAX_MEM_BLOCK?
n_columns = int(util.MAX_MEM_BLOCK / (stft_matrix.shape[0] *
stft_matrix.itemsize))
for bl_s in range(0, stft_matrix.shape[1], n_columns):
bl_t = min(bl_s + n_columns, stft_matrix.shape[1])
# RFFT and Conjugate here to match phase from DPWE code
stft_matrix[:, bl_s:bl_t] = fft.fft(fft_window *
y_frames[:, bl_s:bl_t],
axis=0)[:stft_matrix.shape[0]].conj()
return stft_matrix
@cache(level=30)
def istft(stft_matrix, hop_length=None, win_length=None, window='hann',
center=True, dtype=np.float32):
"""
Inverse short-time Fourier transform (ISTFT).
Converts a complex-valued spectrogram `stft_matrix` to time-series `y`
by minimizing the mean squared error between `stft_matrix` and STFT of
`y` as described in [1]_.
In general, window function, hop length and other parameters should be same
as in stft, which mostly leads to perfect reconstruction of a signal from
unmodified `stft_matrix`.
.. [1] D. W. Griffin and J. S. Lim,
"Signal estimation from modified short-time Fourier transform,"
IEEE Trans. ASSP, vol.32, no.2, pp.236–243, Apr. 1984.
Parameters
----------
stft_matrix : np.ndarray [shape=(1 + n_fft/2, t)]
STFT matrix from `stft`
hop_length : int > 0 [scalar]
Number of frames between STFT columns.
If unspecified, defaults to `win_length / 4`.
win_length : int <= n_fft = 2 * (stft_matrix.shape[0] - 1)
When reconstructing the time series, each frame is windowed
and each sample is normalized by the sum of squared window
according to the `window` function (see below).
If unspecified, defaults to `n_fft`.
window : string, tuple, number, function, np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, or number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a user-specified window vector of length `n_fft`
.. see also:: `filters.get_window`
center : boolean
- If `True`, `D` is assumed to have centered frames.
- If `False`, `D` is assumed to have left-aligned frames.
dtype : numeric type
Real numeric type for `y`. Default is 32-bit float.
Returns
-------
y : np.ndarray [shape=(n,)]
time domain signal reconstructed from `stft_matrix`
See Also
--------
stft : Short-time Fourier Transform
Notes
-----
This function caches at level 30.
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> D = librosa.stft(y)
>>> y_hat = librosa.istft(D)
>>> y_hat
array([ -4.812e-06, -4.267e-06, ..., 6.271e-06, 2.827e-07], dtype=float32)
Exactly preserving length of the input signal requires explicit padding.
Otherwise, a partial frame at the end of `y` will not be represented.
>>> n = len(y)
>>> n_fft = 2048
>>> y_pad = librosa.util.fix_length(y, n + n_fft // 2)
>>> D = librosa.stft(y_pad, n_fft=n_fft)
>>> y_out = librosa.util.fix_length(librosa.istft(D), n)
>>> np.max(np.abs(y - y_out))
1.4901161e-07
"""
n_fft = 2 * (stft_matrix.shape[0] - 1)
# By default, use the entire frame
if win_length is None:
win_length = n_fft
# Set the default hop, if it's not already specified
if hop_length is None:
hop_length = int(win_length // 4)
ifft_window = get_window(window, win_length, fftbins=True)
# Pad out to match n_fft
ifft_window = util.pad_center(ifft_window, n_fft)
n_frames = stft_matrix.shape[1]
expected_signal_len = n_fft + hop_length * (n_frames - 1)
y = np.zeros(expected_signal_len, dtype=dtype)
ifft_window_sum = np.zeros(expected_signal_len, dtype=dtype)
ifft_window_square = ifft_window * ifft_window
for i in range(n_frames):
sample = i * hop_length
spec = stft_matrix[:, i].flatten()
spec = np.concatenate((spec.conj(), spec[-2:0:-1]), 0)
ytmp = ifft_window * fft.ifft(spec).real
y[sample:(sample + n_fft)] = y[sample:(sample + n_fft)] + ytmp
ifft_window_sum[sample:(sample + n_fft)] += ifft_window_square
# Normalize by sum of squared window
approx_nonzero_indices = ifft_window_sum > util.tiny(ifft_window_sum)
y[approx_nonzero_indices] /= ifft_window_sum[approx_nonzero_indices]
if center:
y = y[int(n_fft // 2):-int(n_fft // 2)]
return y
def ifgram(y, sr=22050, n_fft=2048, hop_length=None, win_length=None,
window='hann', norm=False, center=True, ref_power=1e-6,
clip=True, dtype=np.complex64):
'''Compute the instantaneous frequency (as a proportion of the sampling rate)
obtained as the time-derivative of the phase of the complex spectrum as
described by [1]_.
Calculates regular STFT as a side effect.
.. [1] Abe, Toshihiko, Takao Kobayashi, and Satoshi Imai.
"Harmonics tracking and pitch extraction based on instantaneous
frequency."
International Conference on Acoustics, Speech, and Signal Processing,
ICASSP-95., Vol. 1. IEEE, 1995.
Parameters
----------
y : np.ndarray [shape=(n,)]
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
n_fft : int > 0 [scalar]
FFT window size
hop_length : int > 0 [scalar]
hop length, number samples between subsequent frames.
If not supplied, defaults to `win_length / 4`.
win_length : int > 0, <= n_fft
Window length. Defaults to `n_fft`.
See `stft` for details.
window : string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
- a window specification (string, tuple, number);
see `scipy.signal.get_window`
- a window function, such as `scipy.signal.hanning`
- a user-specified window vector of length `n_fft`
See `stft` for details.
.. see also:: `filters.get_window`
norm : bool
Normalize the STFT.
center : boolean
- If `True`, the signal `y` is padded so that frame
`D[:, t]` (and `if_gram`) is centered at `y[t * hop_length]`.
- If `False`, then `D[:, t]` at `y[t * hop_length]`
ref_power : float >= 0 or callable
Minimum power threshold for estimating instantaneous frequency.
Any bin with `np.abs(D[f, t])**2 < ref_power` will receive the
default frequency estimate.
If callable, the threshold is set to `ref_power(np.abs(D)**2)`.
clip : boolean
- If `True`, clip estimated frequencies to the range `[0, 0.5 * sr]`.
- If `False`, estimated frequencies can be negative or exceed
`0.5 * sr`.
dtype : numeric type
Complex numeric type for `D`. Default is 64-bit complex.
Returns
-------
if_gram : np.ndarray [shape=(1 + n_fft/2, t), dtype=real]
Instantaneous frequency spectrogram:
`if_gram[f, t]` is the frequency at bin `f`, time `t`
D : np.ndarray [shape=(1 + n_fft/2, t), dtype=complex]
Short-time Fourier transform
See Also
--------
stft : Short-time Fourier Transform
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> frequencies, D = librosa.ifgram(y, sr=sr)
>>> frequencies
array([[ 0.000e+00, 0.000e+00, ..., 0.000e+00, 0.000e+00],
[ 3.150e+01, 3.070e+01, ..., 1.077e+01, 1.077e+01],
...,
[ 1.101e+04, 1.101e+04, ..., 1.101e+04, 1.101e+04],
[ 1.102e+04, 1.102e+04, ..., 1.102e+04, 1.102e+04]])
'''
if win_length is None:
win_length = n_fft
if hop_length is None:
hop_length = int(win_length // 4)
# Construct a padded hann window
fft_window = util.pad_center(get_window(window, win_length,
fftbins=True),
n_fft)
# Window for discrete differentiation
freq_angular = np.linspace(0, 2 * np.pi, n_fft, endpoint=False)
d_window = np.sin(-freq_angular) * np.pi / n_fft
stft_matrix = stft(y, n_fft=n_fft, hop_length=hop_length,
win_length=win_length,
window=window, center=center, dtype=dtype)
diff_stft = stft(y, n_fft=n_fft, hop_length=hop_length,
window=d_window, center=center, dtype=dtype).conj()
# Compute power normalization. Suppress zeros.
mag, phase = magphase(stft_matrix)
if six.callable(ref_power):
ref_power = ref_power(mag**2)
elif ref_power < 0:
raise ParameterError('ref_power must be non-negative or callable.')
# Pylint does not correctly infer the type here, but it's correct.
# pylint: disable=maybe-no-member
freq_angular = freq_angular.reshape((-1, 1))
bin_offset = (phase * diff_stft).imag / mag
bin_offset[mag < ref_power**0.5] = 0
if_gram = freq_angular[:n_fft//2 + 1] + bin_offset
if norm:
stft_matrix = stft_matrix * 2.0 / fft_window.sum()
if clip:
np.clip(if_gram, 0, np.pi, out=if_gram)
if_gram *= float(sr) * 0.5 / np.pi
return if_gram, stft_matrix
def magphase(D):
"""Separate a complex-valued spectrogram D into its magnitude (S)
and phase (P) components, so that `D = S * P`.
Parameters
----------
D : np.ndarray [shape=(d, t), dtype=complex]
complex-valued spectrogram
Returns
-------
D_mag : np.ndarray [shape=(d, t), dtype=real]
magnitude of `D`
D_phase : np.ndarray [shape=(d, t), dtype=complex]
`exp(1.j * phi)` where `phi` is the phase of `D`
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> D = librosa.stft(y)
>>> magnitude, phase = librosa.magphase(D)
>>> magnitude
array([[ 2.524e-03, 4.329e-02, ..., 3.217e-04, 3.520e-05],
[ 2.645e-03, 5.152e-02, ..., 3.283e-04, 3.432e-04],
...,
[ 1.966e-05, 9.828e-06, ..., 3.164e-07, 9.370e-06],
[ 1.966e-05, 9.830e-06, ..., 3.161e-07, 9.366e-06]], dtype=float32)
>>> phase
array([[ 1.000e+00 +0.000e+00j, 1.000e+00 +0.000e+00j, ...,
-1.000e+00 +8.742e-08j, -1.000e+00 +8.742e-08j],
[ 1.000e+00 +1.615e-16j, 9.950e-01 -1.001e-01j, ...,
9.794e-01 +2.017e-01j, 1.492e-02 -9.999e-01j],
...,
[ 1.000e+00 -5.609e-15j, -5.081e-04 +1.000e+00j, ...,
-9.549e-01 -2.970e-01j, 2.938e-01 -9.559e-01j],
[ -1.000e+00 +8.742e-08j, -1.000e+00 +8.742e-08j, ...,
-1.000e+00 +8.742e-08j, -1.000e+00 +8.742e-08j]], dtype=complex64)
Or get the phase angle (in radians)
>>> np.angle(phase)
array([[ 0.000e+00, 0.000e+00, ..., 3.142e+00, 3.142e+00],
[ 1.615e-16, -1.003e-01, ..., 2.031e-01, -1.556e+00],
...,
[ -5.609e-15, 1.571e+00, ..., -2.840e+00, -1.273e+00],
[ 3.142e+00, 3.142e+00, ..., 3.142e+00, 3.142e+00]], dtype=float32)
"""
mag = np.abs(D)
phase = np.exp(1.j * np.angle(D))
return mag, phase
def phase_vocoder(D, rate, hop_length=None):
"""Phase vocoder. Given an STFT matrix D, speed up by a factor of `rate`
Based on the implementation provided by [1]_.
.. [1] Ellis, D. P. W. "A phase vocoder in Matlab."
Columbia University, 2002.
http://www.ee.columbia.edu/~dpwe/resources/matlab/pvoc/
Examples
--------
>>> # Play at double speed
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> D = librosa.stft(y, n_fft=2048, hop_length=512)
>>> D_fast = librosa.phase_vocoder(D, 2.0, hop_length=512)
>>> y_fast = librosa.istft(D_fast, hop_length=512)
>>> # Or play at 1/3 speed
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> D = librosa.stft(y, n_fft=2048, hop_length=512)
>>> D_slow = librosa.phase_vocoder(D, 1./3, hop_length=512)
>>> y_slow = librosa.istft(D_slow, hop_length=512)
Parameters
----------
D : np.ndarray [shape=(d, t), dtype=complex]
STFT matrix
rate : float > 0 [scalar]
Speed-up factor: `rate > 1` is faster, `rate < 1` is slower.
hop_length : int > 0 [scalar] or None
The number of samples between successive columns of `D`.
If None, defaults to `n_fft/4 = (D.shape[0]-1)/2`
Returns
-------
D_stretched : np.ndarray [shape=(d, t / rate), dtype=complex]
time-stretched STFT
"""
n_fft = 2 * (D.shape[0] - 1)
if hop_length is None:
hop_length = int(n_fft // 4)
time_steps = np.arange(0, D.shape[1], rate, dtype=np.float)
# Create an empty output array
d_stretch = np.zeros((D.shape[0], len(time_steps)), D.dtype, order='F')
# Expected phase advance in each bin
phi_advance = np.linspace(0, np.pi * hop_length, D.shape[0])
# Phase accumulator; initialize to the first sample
phase_acc = np.angle(D[:, 0])
# Pad 0 columns to simplify boundary logic
D = np.pad(D, [(0, 0), (0, 2)], mode='constant')
for (t, step) in enumerate(time_steps):
columns = D[:, int(step):int(step + 2)]
# Weighting for linear magnitude interpolation
alpha = np.mod(step, 1.0)
mag = ((1.0 - alpha) * np.abs(columns[:, 0])
+ alpha * np.abs(columns[:, 1]))
# Store to output array
d_stretch[:, t] = mag * np.exp(1.j * phase_acc)
# Compute phase advance
dphase = (np.angle(columns[:, 1])
- np.angle(columns[:, 0])
- phi_advance)
# Wrap to -pi:pi range
dphase = dphase - 2.0 * np.pi * np.round(dphase / (2.0 * np.pi))
# Accumulate phase
phase_acc += phi_advance + dphase
return d_stretch
@cache(level=30)
def power_to_db(S, ref=1.0, amin=1e-10, top_db=80.0, ref_power=Deprecated()):
"""Convert a power spectrogram (amplitude squared) to decibel (dB) units
This computes the scaling ``10 * log10(S / ref)`` in a numerically
stable way.
Parameters
----------
S : np.ndarray
input power
ref : scalar or callable
If scalar, the amplitude `abs(S)` is scaled relative to `ref`:
`10 * log10(S / ref)`.
Zeros in the output correspond to positions where `S == ref`.
If callable, the reference value is computed as `ref(S)`.
amin : float > 0 [scalar]
minimum threshold for `abs(S)` and `ref`
top_db : float >= 0 [scalar]
threshold the output at `top_db` below the peak:
``max(10 * log10(S)) - top_db``
ref_power : scalar or callable
.. warning:: This parameter name was deprecated in librosa 0.5.0.
Use the `ref` parameter instead.
The `ref_power` parameter will be removed in librosa 0.6.0.
Returns
-------
S_db : np.ndarray
``S_db ~= 10 * log10(S) - 10 * log10(ref)``
See Also
--------
perceptual_weighting
db_to_power
amplitude_to_db
db_to_amplitude
Notes
-----
This function caches at level 30.
Examples
--------
Get a power spectrogram from a waveform ``y``
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> S = np.abs(librosa.stft(y))
>>> librosa.power_to_db(S**2)
array([[-33.293, -27.32 , ..., -33.293, -33.293],
[-33.293, -25.723, ..., -33.293, -33.293],
...,
[-33.293, -33.293, ..., -33.293, -33.293],
[-33.293, -33.293, ..., -33.293, -33.293]], dtype=float32)
Compute dB relative to peak power
>>> librosa.power_to_db(S**2, ref=np.max)
array([[-80. , -74.027, ..., -80. , -80. ],
[-80. , -72.431, ..., -80. , -80. ],
...,
[-80. , -80. , ..., -80. , -80. ],
[-80. , -80. , ..., -80. , -80. ]], dtype=float32)
Or compare to median power
>>> librosa.power_to_db(S**2, ref=np.median)
array([[-0.189, 5.784, ..., -0.189, -0.189],
[-0.189, 7.381, ..., -0.189, -0.189],
...,
[-0.189, -0.189, ..., -0.189, -0.189],
[-0.189, -0.189, ..., -0.189, -0.189]], dtype=float32)
And plot the results
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(S**2, sr=sr, y_axis='log')
>>> plt.colorbar()
>>> plt.title('Power spectrogram')
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(librosa.power_to_db(S**2, ref=np.max),
... sr=sr, y_axis='log', x_axis='time')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.title('Log-Power spectrogram')
>>> plt.tight_layout()
"""
if amin <= 0:
raise ParameterError('amin must be strictly positive')
magnitude = np.abs(S)
ref = rename_kw('ref_power', ref_power,
'ref', ref,
'0.5', '0.6')
if six.callable(ref):
# User supplied a function to calculate reference power
ref_value = ref(magnitude)
else:
ref_value = np.abs(ref)
log_spec = 10.0 * np.log10(np.maximum(amin, magnitude))
log_spec -= 10.0 * np.log10(np.maximum(amin, ref_value))
if top_db is not None:
if top_db < 0:
raise ParameterError('top_db must be non-negative')
log_spec = np.maximum(log_spec, log_spec.max() - top_db)
return log_spec
logamplitude = moved('librosa.logamplitude', '0.5', '0.6')(power_to_db)
@cache(level=30)
def db_to_power(S_db, ref=1.0):
'''Convert a dB-scale spectrogram to a power spectrogram.
This effectively inverts `power_to_db`:
`db_to_power(S_db) ~= ref * 10.0**(S_db / 10)`
Parameters
----------
S_db : np.ndarray
dB-scaled spectrogram
ref : number > 0
Reference power: output will be scaled by this value
Returns
-------
S : np.ndarray
Power spectrogram
Notes
-----
This function caches at level 30.
'''
return ref * np.power(10.0, 0.1 * S_db)
@cache(level=30)
def amplitude_to_db(S, ref=1.0, amin=1e-5, top_db=80.0):
'''Convert an amplitude spectrogram to dB-scaled spectrogram.
This is equivalent to ``power_to_db(S**2)``, but is provided for convenience.
Parameters
----------
S : np.ndarray
input amplitude
ref : scalar or callable
If scalar, the amplitude `abs(S)` is scaled relative to `ref`:
`20 * log10(S / ref)`.
Zeros in the output correspond to positions where `S == ref`.
If callable, the reference value is computed as `ref(S)`.
amin : float > 0 [scalar]
minimum threshold for `S` and `ref`
top_db : float >= 0 [scalar]
threshold the output at `top_db` below the peak:
``max(20 * log10(S)) - top_db``
Returns
-------
S_db : np.ndarray
``S`` measured in dB
See Also
--------
logamplitude, power_to_db, db_to_amplitude
Notes
-----
This function caches at level 30.
'''
magnitude = np.abs(S)
if six.callable(ref):
# User supplied a function to calculate reference power
ref_value = ref(magnitude)
else:
ref_value = np.abs(ref)
magnitude **= 2
return power_to_db(magnitude, ref=ref_value**2, amin=amin**2,
top_db=top_db)
@cache(level=30)
def db_to_amplitude(S_db, ref=1.0):
'''Convert a dB-scaled spectrogram to an amplitude spectrogram.
This effectively inverts `amplitude_to_db`:
`db_to_amplitude(S_db) ~= 10.0**(0.5 * (S_db + log10(ref)/10))`
Parameters
----------
S_db : np.ndarray
dB-scaled spectrogram
ref: number > 0
Optional reference power.
Returns
-------
S : np.ndarray
Linear magnitude spectrogram
Notes
-----
This function caches at level 30.
'''
return db_to_power(S_db, ref=ref**2)**0.5
@cache(level=30)
def perceptual_weighting(S, frequencies, **kwargs):
'''Perceptual weighting of a power spectrogram:
`S_p[f] = A_weighting(f) + 10*log(S[f] / ref)`
Parameters
----------
S : np.ndarray [shape=(d, t)]
Power spectrogram
frequencies : np.ndarray [shape=(d,)]
Center frequency for each row of `S`
kwargs : additional keyword arguments
Additional keyword arguments to `logamplitude`.
Returns
-------
S_p : np.ndarray [shape=(d, t)]
perceptually weighted version of `S`
See Also
--------
logamplitude
Notes
-----
This function caches at level 30.
Examples
--------
Re-weight a CQT power spectrum, using peak power as reference
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> CQT = librosa.cqt(y, sr=sr, fmin=librosa.note_to_hz('A1'))
>>> freqs = librosa.cqt_frequencies(CQT.shape[0],
... fmin=librosa.note_to_hz('A1'))
>>> perceptual_CQT = librosa.perceptual_weighting(CQT**2,
... freqs,
... ref=np.max)
>>> perceptual_CQT
array([[ -80.076, -80.049, ..., -104.735, -104.735],
[ -78.344, -78.555, ..., -103.725, -103.725],
...,
[ -76.272, -76.272, ..., -76.272, -76.272],
[ -76.485, -76.485, ..., -76.485, -76.485]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(CQT,
... ref=np.max),
... fmin=librosa.note_to_hz('A1'),
... y_axis='cqt_hz')
>>> plt.title('Log CQT power')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(perceptual_CQT, y_axis='cqt_hz',
... fmin=librosa.note_to_hz('A1'),
... x_axis='time')
>>> plt.title('Perceptually weighted log CQT')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.tight_layout()
'''
offset = time_frequency.A_weighting(frequencies).reshape((-1, 1))
return offset + logamplitude(S, **kwargs)
@cache(level=30)
def fmt(y, t_min=0.5, n_fmt=None, kind='cubic', beta=0.5, over_sample=1, axis=-1):
"""The fast Mellin transform (FMT) [1]_ of a uniformly sampled signal y.
When the Mellin parameter (beta) is 1/2, it is also known as the scale transform [2]_.
The scale transform can be useful for audio analysis because its magnitude is invariant
to scaling of the domain (e.g., time stretching or compression). This is analogous
to the magnitude of the Fourier transform being invariant to shifts in the input domain.
.. [1] De Sena, Antonio, and Davide Rocchesso.
"A fast Mellin and scale transform."
EURASIP Journal on Applied Signal Processing 2007.1 (2007): 75-75.
.. [2] Cohen, L.
"The scale representation."
IEEE Transactions on Signal Processing 41, no. 12 (1993): 3275-3292.
Parameters
----------
y : np.ndarray, real-valued
The input signal(s). Can be multidimensional.
The target axis must contain at least 3 samples.
t_min : float > 0
The minimum time spacing (in samples).
This value should generally be less than 1 to preserve as much information as
possible.
n_fmt : int > 2 or None
The number of scale transform bins to use.
If None, then `n_bins = over_sample * ceil(n * log((n-1)/t_min))` is taken,
where `n = y.shape[axis]`
kind : str
The type of interpolation to use when re-sampling the input.
See `scipy.interpolate.interp1d` for possible values.
Note that the default is to use high-precision (cubic) interpolation.
This can be slow in practice; if speed is preferred over accuracy,
then consider using `kind='linear'`.
beta : float
The Mellin parameter. `beta=0.5` provides the scale transform.
over_sample : float >= 1
Over-sampling factor for exponential resampling.
axis : int
The axis along which to transform `y`
Returns
-------
x_scale : np.ndarray [dtype=complex]
The scale transform of `y` along the `axis` dimension.
Raises
------
ParameterError
if `n_fmt < 2` or `t_min <= 0`
or if `y` is not finite
or if `y.shape[axis] < 3`.
Notes
-----
This function caches at level 30.
Examples
--------
>>> # Generate a signal and time-stretch it (with energy normalization)
>>> scale = 1.25
>>> freq = 3.0
>>> x1 = np.linspace(0, 1, num=1024, endpoint=False)
>>> x2 = np.linspace(0, 1, num=scale * len(x1), endpoint=False)
>>> y1 = np.sin(2 * np.pi * freq * x1)
>>> y2 = np.sin(2 * np.pi * freq * x2) / np.sqrt(scale)
>>> # Verify that the two signals have the same energy
>>> np.sum(np.abs(y1)**2), np.sum(np.abs(y2)**2)
(255.99999999999997, 255.99999999999969)
>>> scale1 = librosa.fmt(y1, n_fmt=512)
>>> scale2 = librosa.fmt(y2, n_fmt=512)
>>> # And plot the results
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(8, 4))
>>> plt.subplot(1, 2, 1)
>>> plt.plot(y1, label='Original')
>>> plt.plot(y2, linestyle='--', label='Stretched')
>>> plt.xlabel('time (samples)')
>>> plt.title('Input signals')
>>> plt.legend(frameon=True)
>>> plt.axis('tight')
>>> plt.subplot(1, 2, 2)
>>> plt.semilogy(np.abs(scale1), label='Original')
>>> plt.semilogy(np.abs(scale2), linestyle='--', label='Stretched')
>>> plt.xlabel('scale coefficients')
>>> plt.title('Scale transform magnitude')
>>> plt.legend(frameon=True)
>>> plt.axis('tight')
>>> plt.tight_layout()
>>> # Plot the scale transform of an onset strength autocorrelation
>>> y, sr = librosa.load(librosa.util.example_audio_file(),
... offset=10.0, duration=30.0)
>>> odf = librosa.onset.onset_strength(y=y, sr=sr)
>>> # Auto-correlate with up to 10 seconds lag
>>> odf_ac = librosa.autocorrelate(odf, max_size=10 * sr // 512)
>>> # Normalize
>>> odf_ac = librosa.util.normalize(odf_ac, norm=np.inf)
>>> # Compute the scale transform
>>> odf_ac_scale = librosa.fmt(librosa.util.normalize(odf_ac), n_fmt=512)
>>> # Plot the results
>>> plt.figure()
>>> plt.subplot(3, 1, 1)
>>> plt.plot(odf, label='Onset strength')
>>> plt.axis('tight')
>>> plt.xlabel('Time (frames)')
>>> plt.xticks([])
>>> plt.legend(frameon=True)
>>> plt.subplot(3, 1, 2)
>>> plt.plot(odf_ac, label='Onset autocorrelation')
>>> plt.axis('tight')
>>> plt.xlabel('Lag (frames)')
>>> plt.xticks([])
>>> plt.legend(frameon=True)
>>> plt.subplot(3, 1, 3)
>>> plt.semilogy(np.abs(odf_ac_scale), label='Scale transform magnitude')
>>> plt.axis('tight')
>>> plt.xlabel('scale coefficients')
>>> plt.legend(frameon=True)
>>> plt.tight_layout()
"""
n = y.shape[axis]
if n < 3:
raise ParameterError('y.shape[{:}]=={:} < 3'.format(axis, n))
if t_min <= 0:
raise ParameterError('t_min must be a positive number')
if n_fmt is None:
if over_sample < 1:
raise ParameterError('over_sample must be >= 1')
# The base is the maximum ratio between adjacent samples
# Since the sample spacing is increasing, this is simply the
# ratio between the positions of the last two samples: (n-1)/(n-2)
log_base = np.log(n - 1) - np.log(n - 2)
n_fmt = int(np.ceil(over_sample * (np.log(n - 1) - np.log(t_min)) / log_base))
elif n_fmt < 3:
raise ParameterError('n_fmt=={:} < 3'.format(n_fmt))
else:
log_base = (np.log(n_fmt - 1) - np.log(n_fmt - 2)) / over_sample
if not np.all(np.isfinite(y)):
raise ParameterError('y must be finite everywhere')
base = np.exp(log_base)
# original grid: signal covers [0, 1). This range is arbitrary, but convenient.
# The final sample is positioned at (n-1)/n, so we omit the endpoint
x = np.linspace(0, 1, num=n, endpoint=False)
# build the interpolator
f_interp = scipy.interpolate.interp1d(x, y, kind=kind, axis=axis)
# build the new sampling grid
# exponentially spaced between t_min/n and 1 (exclusive)
# we'll go one past where we need, and drop the last sample
# When over-sampling, the last input sample contributions n_over samples.
# To keep the spacing consistent, we over-sample by n_over, and then
# trim the final samples.
n_over = int(np.ceil(over_sample))
x_exp = np.logspace((np.log(t_min) - np.log(n)) / log_base,
0,
num=n_fmt + n_over,
endpoint=False,
base=base)[:-n_over]
# Clean up any rounding errors at the boundaries of the interpolation
# The interpolator gets angry if we try to extrapolate, so clipping is necessary here.
if x_exp[0] < t_min or x_exp[-1] > float(n - 1.0) / n:
x_exp = np.clip(x_exp, float(t_min) / n, x[-1])
# Make sure that all sample points are unique
assert len(np.unique(x_exp)) == len(x_exp)
# Resample the signal
y_res = f_interp(x_exp)
# Broadcast the window correctly
shape = [1] * y_res.ndim
shape[axis] = -1
# Apply the window and fft
result = fft.fft(y_res * (x_exp**beta).reshape(shape),
axis=axis, overwrite_x=True)
# Slice out the positive-scale component
idx = [slice(None)] * result.ndim
idx[axis] = slice(0, 1 + n_fmt//2)
# Truncate and length-normalize
return result[idx] * np.sqrt(n) / n_fmt
def _spectrogram(y=None, S=None, n_fft=2048, hop_length=512, power=1):
'''Helper function to retrieve a magnitude spectrogram.
This is primarily used in feature extraction functions that can operate on
either audio time-series or spectrogram input.
Parameters
----------
y : None or np.ndarray [ndim=1]
If provided, an audio time series
S : None or np.ndarray
Spectrogram input, optional
n_fft : int > 0
STFT window size
hop_length : int > 0
STFT hop length
power : float > 0
Exponent for the magnitude spectrogram,
e.g., 1 for energy, 2 for power, etc.
Returns
-------
S_out : np.ndarray [dtype=np.float32]
- If `S` is provided as input, then `S_out == S`
- Else, `S_out = |stft(y, n_fft=n_fft, hop_length=hop_length)|**power`
n_fft : int > 0
- If `S` is provided, then `n_fft` is inferred from `S`
- Else, copied from input
'''
if S is not None:
# Infer n_fft from spectrogram shape
n_fft = 2 * (S.shape[0] - 1)
else:
# Otherwise, compute a magnitude spectrogram from input
S = np.abs(stft(y, n_fft=n_fft, hop_length=hop_length))**power
return S, n_fft
|
isc
|
vdmitriyev/services-to-wordcloud
|
services/twitter/twitter_user_locations.py
|
1
|
4107
|
#!/usr/bin/env python
__author__ = "Viktor Dmitriyev"
__copyright__ = "Copyright 2015, Viktor Dmitriyev"
__credits__ = ["Viktor Dmitriyev"]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "-"
__email__ = ""
__status__ = "dev"
__date__ = "24.02.2015"
__description__ = "Tiny python utility that downloads locations of your twitter followers."
import os
import pandas as pd
import twitter
from datetime import datetime
import oauth_info as auth # our local file with the OAuth infos
class TimelineMiner(object):
def __init__(self, access_token, access_secret, consumer_key, consumer_secret, user_name):
self.access_token = access_token
self.access_secret = access_secret
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.user_name = user_name
self.auth = None
self.df = pd.DataFrame(columns=['screen_name', 'id', 'location'], dtype='str')
def authenticate(self):
self.auth = twitter.Twitter(auth=twitter.OAuth(self.access_token,
self.access_secret, self.consumer_key,
self.consumer_secret))
return bool(isinstance(self.auth, twitter.api.Twitter))
def get_list_of_users(self):
"""
(obj) -> None
Method extracts the twitter followers of the specified user and then initiates lookup in order to extract locations.
"""
counter = 0
lookup_ids = list()
def _fire_lookup(counter, look_up):
"""
(str, list) -> None
Internal method to lookup user accounts by ID.
"""
follower_ids = self.auth.users.lookup(user_id=look_up)
for follower in follower_ids:
#self.df.loc[counter,'screen_name'] = follower['screen_name']
#self.df.loc[counter,'id'] = follower['id']
self.df.loc[counter,'location'] = follower['location']
counter += 1
follower_ids = self.auth.followers.ids(screen_name=self.user_name)
for account_id in follower_ids['ids']:
lookup_ids.append(account_id)
if len(lookup_ids) == 100:
_fire_lookup(counter, lookup_ids)
lookup_ids = list()
counter +=100
if len(lookup_ids) > 0:
_fire_lookup(counter, lookup_ids)
def make_csv(self, path):
"""
(obj, str) -> None
Checks if the directory 'data' exists, creates it otherwise.
Saves the data from twitter in csv using pandas.
"""
if not os.path.exists('data'):
os.makedirs('data')
self.df.to_csv(path, encoding='utf8')
print '[i] tweets saved into %s' % path
def __get_date(self, timeline, tweet):
timest = datetime.strptime(timeline[tweet]['created_at'],
"%a %b %d %H:%M:%S +0000 %Y")
date = timest.strftime("%Y-%d-%m %H:%M:%S")
return date
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description='A command line tool to download your personal twitter user locations.',
formatter_class=argparse.RawTextHelpFormatter,
epilog='\nExample:\n'\
'./twitter_user_locations.py -o my_timeline.csv -k Python,Github')
parser.add_argument('-o', '--out', help='Filename for creating the output CSV file.')
parser.add_argument('-v', '--version', action='version', version='v. 1.0.0')
args = parser.parse_args()
if not args.out:
print('Please provide a filename for creating the output CSV file.')
quit()
tm = TimelineMiner(auth.ACCESS_TOKEN,
auth.ACCESS_TOKEN_SECRET,
auth.CONSUMER_KEY,
auth.CONSUMER_SECRET,
auth.USER_NAME)
print('Authentification successful: %s' %tm.authenticate())
tm.get_list_of_users()
tm.make_csv(args.out)
|
mit
|
mramire8/active
|
experiment/unc_fixk.py
|
1
|
11089
|
__author__ = 'maru'
__copyright__ = "Copyright 2013, ML Lab"
__version__ = "0.1"
__status__ = "Development"
import sys
import os
sys.path.append(os.path.abspath("."))
from experiment_utils import *
import argparse
import numpy as np
from sklearn.datasets.base import Bunch
from datautil.load_data import *
from sklearn import linear_model
import time
from sklearn import metrics
from collections import defaultdict
from datautil.textutils import StemTokenizer
from strategy import randomsampling
from expert import baseexpert
from sklearn.feature_extraction.text import CountVectorizer
import pickle
############# COMMAND LINE PARAMETERS ##################
ap = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
ap.add_argument('--train',
metavar='TRAIN',
default="20news",
help='training data (libSVM format)')
ap.add_argument('--neutral-threshold',
metavar='NEUTRAL',
type=float,
default=.4,
help='neutrality threshold of uncertainty')
ap.add_argument('--expert-penalty',
metavar='EXPERT_PENALTY',
type=float,
default=0.3,
help='Expert penalty value for the classifier simulation')
ap.add_argument('--trials',
metavar='TRIALS',
type=int,
default=5,
help='number of trials')
ap.add_argument('--folds',
metavar='FOLDS',
type=int,
default=1,
help='number of folds')
ap.add_argument('--budget',
metavar='BUDGET',
type=int,
default=20000,
help='budget')
ap.add_argument('--step-size',
metavar='STEP_SIZE',
type=int,
default=10,
help='instances to acquire at every iteration')
ap.add_argument('--bootstrap',
metavar='BOOTSTRAP',
type=int,
default=50,
help='size of the initial labeled dataset')
ap.add_argument('--cost-function',
metavar='COST_FUNCTION',
type=str,
default="direct",
help='cost function of the x-axis [uniform|log|linear|direct]')
ap.add_argument('--cost-model',
metavar='COST_MODEL',
type=str,
default="[[10.0,5.7], [25.0,8.2], [50.1,10.9], [75,15.9], [100,16.7], [125,17.8], [150,22.7], [175,19.9], [200,17.4]]",
help='cost function parameters of the cost function')
ap.add_argument('--fixk',
metavar='FIXK',
type=int,
default=10,
help='fixed k number of words')
ap.add_argument('--maxiter',
metavar='MAXITER',
type=int,
default=5,
help='Max number of iterations')
ap.add_argument('--seed',
metavar='SEED',
type=int,
default=8765432,
help='Max number of iterations')
ap.add_argument('--method',
metavar='METHOD',
type=str,
default="unc",
help='Sampling method [rnd|unc]')
ap.add_argument('--classifier',
metavar='CLASSIFIER',
type=str,
default="lr",
help='underlying classifier')
args = ap.parse_args()
rand = np.random.mtrand.RandomState(args.seed)
print args
print
####################### MAIN ####################
def main():
accuracies = defaultdict(lambda: [])
aucs = defaultdict(lambda: [])
x_axis = defaultdict(lambda: [])
vct = CountVectorizer(encoding='ISO-8859-1', min_df=5, max_df=1.0, binary=True, ngram_range=(1, 3),
token_pattern='\\b\\w+\\b', tokenizer=StemTokenizer())
vct_analizer = vct.build_tokenizer()
print("Start loading ...")
# data fields: data, bow, file_names, target_names, target
########## NEWS GROUPS ###############
# easy to hard. see "Less is More" paper: http://axon.cs.byu.edu/~martinez/classes/678/Presentations/Clawson.pdf
categories = [['alt.atheism', 'talk.religion.misc'],
['comp.graphics', 'comp.windows.x'],
['comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware'],
['rec.sport.baseball', 'sci.crypt']]
min_size = max(100, args.fixk)
if args.fixk < 0:
args.fixk = None
data, vct = load_from_file(args.train, categories, args.fixk, min_size, vct)
# data = load_dataset(args.train, args.fixk, categories[0], vct, min_size)
print("Data %s" % args.train)
print("Data size %s" % len(data.train.data))
parameters = parse_parameters_mat(args.cost_model)
print "Cost Parameters %s" % parameters
cost_model = set_cost_model(args.cost_function, parameters=parameters)
print "\nCost Model: %s" % cost_model.__class__.__name__
#### STUDENT CLASSIFIER
clf = set_classifier(args.classifier)
print "\nClassifier: %s" % clf
#### EXPERT CLASSIFIER
exp_clf = linear_model.LogisticRegression(penalty='l1', C=args.expert_penalty)
exp_clf.fit(data.test.bow, data.test.target)
expert = baseexpert.NeutralityExpert(exp_clf, threshold=args.neutral_threshold,
cost_function=cost_model.cost_function)
print "\nExpert: %s " % expert
#### ACTIVE LEARNING SETTINGS
step_size = args.step_size
bootstrap_size = args.bootstrap
evaluation_points = 200
print("\nExperiment: step={0}, BT={1}, plot points={2}, fixk:{3}, minsize:{4}".format(step_size, bootstrap_size,
evaluation_points, args.fixk,
min_size))
print ("Cheating experiment - use full uncertainty query k words")
t0 = time.time()
### experiment starts
tx =[]
tac = []
tau = []
for t in range(args.trials):
trial_accu =[]
trial_aucs = []
trial_x_axis = []
print "*" * 60
print "Trial: %s" % t
student = randomsampling.UncertaintyLearner(model=clf, accuracy_model=None, budget=args.budget, seed=t,
subpool=250)
print "\nStudent: %s " % student
train_indices = []
train_x = []
train_y = []
pool = Bunch()
pool.data = data.train.bow.tocsr() # full words, for training
pool.fixk = data.train.bowk.tocsr() # k words BOW for querying
pool.target = data.train.target
pool.predicted = []
pool.kwords = np.array(data.train.kwords) # k words
pool.remaining = set(range(pool.data.shape[0])) # indices of the pool
bootstrapped = False
current_cost = 0
iteration = 0
while 0 < student.budget and len(pool.remaining) > step_size and iteration <= args.maxiter:
if not bootstrapped:
## random from each bootstrap
bt = randomsampling.BootstrapFromEach(t * 10)
query_index = bt.bootstrap(pool=pool, k=bootstrap_size)
bootstrapped = True
print "Bootstrap: %s " % bt.__class__.__name__
print
else:
query_index = student.pick_next(pool=pool, k=step_size)
query = pool.fixk[query_index] # query with k words
query_size = [len(vct_analizer(x)) for x in pool.kwords[query_index]]
ground_truth = pool.target[query_index]
#labels, spent = expert.label(unlabeled=query, target=ground_truth)
if iteration == 0: ## bootstrap uses ground truth
labels = ground_truth
spent = [0] * len(ground_truth) ## bootstrap cost is ignored
else:
labels = expert.label_instances(query, ground_truth)
spent = expert.estimate_instances(query_size)
### accumulate the cost of the query
query_cost = np.array(spent).sum()
current_cost += query_cost
## add data recent acquired to train
useful_answers = np.array([[x, y] for x, y in zip(query_index, labels) if y is not None])
# train_indices.extend(query_index)
if useful_answers.shape[0] != 0:
train_indices.extend(useful_answers[:, 0])
# add labels to training
train_x = pool.data[train_indices] ## train with all the words
# update labels with the expert labels
#train_y = pool.target[train_indices]
train_y.extend(useful_answers[:, 1])
if train_x.shape[0] != len(train_y):
raise Exception("Training data corrupted!")
# remove labels from pool
pool.remaining.difference_update(query_index)
# retrain the model
current_model = student.train(train_x, train_y)
# evaluate and save results
y_probas = current_model.predict_proba(data.test.bow)
auc = metrics.roc_auc_score(data.test.target, y_probas[:, 1])
pred_y = current_model.classes_[np.argmax(y_probas, axis=1)]
accu = metrics.accuracy_score(data.test.target, pred_y)
print ("TS:{0}\tAccu:{1:.3f}\tAUC:{2:.3f}\tCost:{3:.2f}\tCumm:{4:.2f}\tSpent:{5}\tuseful:{6}".format(len(train_indices),
accu,
auc, query_cost,
current_cost, format_spent(spent), useful_answers.shape[0]))
## the results should be based on the cost of the labeling
if iteration > 0: # bootstrap iteration
student.budget -= query_cost ## Bootstrap doesn't count
x_axis_range = current_cost
x_axis[x_axis_range].append(current_cost)
## save results
accuracies[x_axis_range].append(accu)
aucs[x_axis_range].append(auc)
trial_accu.append([x_axis_range, accu])
trial_aucs.append([x_axis_range, auc])
iteration += 1
# end of budget loop
tac.append(trial_accu)
tau.append(trial_aucs)
#end trial loop
accuracies = extrapolate_trials(tac, cost_25=parameters[1][1], step_size=args.step_size)
aucs = extrapolate_trials(tau, cost_25=parameters[1][1], step_size=args.step_size)
print("Elapsed time %.3f" % (time.time() - t0))
print_extrapolated_results(accuracies, aucs)
if __name__ == '__main__':
main()
|
apache-2.0
|
janpipek/boadata
|
boadata/data/dw_types.py
|
1
|
1340
|
import re
import datadotworld as dw
from boadata.core import DataObject
from boadata.core.data_conversion import ChainConversion
from .pandas_types import PandasDataFrameBase
@ChainConversion.enable_from(
"csv", through="pandas_data_frame", pass_kwargs=["user", "dataset", "table"]
)
@DataObject.register_type()
class DataDotWorldTable(PandasDataFrameBase):
type_name = "dw_table"
URI_RE = re.compile(r"dw://(\w|\-)+/(\w|\-)+/(\w|\-)+$")
@classmethod
def accepts_uri(cls, uri: str) -> bool:
return re.match(DataDotWorldTable.URI_RE, uri) is not None
@classmethod
def from_uri(cls, uri: str, **kwargs) -> "DataDotWorldTable":
dataset_name = "/".join(uri.split("/")[2:-1])
dataset = dw.load_dataset(dataset_name)
df = dataset.dataframes[uri.split("/")[-1]]
return cls(inner_data=df, uri=uri, **kwargs)
@classmethod
def __from_pandas_data_frame__(cls, df: PandasDataFrameBase, user: str, dataset: str, table: str) -> "DataDotWorldTable":
with dw.open_remote_file(
"{0}/{1}".format(user, dataset), "{0}.csv".format(table)
) as w:
print(df.inner_data)
df.inner_data.to_csv(w, index=False)
uri = "dw://{0}/{1}/{2}".format(user, dataset, table)
return DataDotWorldTable.from_uri(uri, source=df)
|
mit
|
ifuding/Kaggle
|
SVPC/Code/philly/HimanChau.py
|
2
|
9735
|
#Initially forked from Bojan's kernel here: https://www.kaggle.com/tunguz/bow-meta-text-and-dense-features-lb-0-2242/code
#That kernel was forked from Nick Brook's kernel here: https://www.kaggle.com/nicapotato/bow-meta-text-and-dense-features-lgbm?scriptVersionId=3493400
#Used oof method from Faron's kernel here: https://www.kaggle.com/mmueller/stacking-starter?scriptVersionId=390867
#Used some text cleaning method from Muhammad Alfiansyah's kernel here: https://www.kaggle.com/muhammadalfiansyah/push-the-lgbm-v19
import time
notebookstart= time.time()
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import gc
print("Data:\n",os.listdir("../input"))
# Models Packages
from sklearn import metrics
from sklearn.metrics import mean_squared_error
from sklearn import feature_selection
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
# Gradient Boosting
import lightgbm as lgb
from sklearn.linear_model import Ridge
from sklearn.cross_validation import KFold
# Tf-Idf
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.pipeline import FeatureUnion
from scipy.sparse import hstack, csr_matrix
from nltk.corpus import stopwords
# Viz
import seaborn as sns
import matplotlib.pyplot as plt
import re
import string
NFOLDS = 5
SEED = 42
class SklearnWrapper(object):
def __init__(self, clf, seed=0, params=None, seed_bool = True):
if(seed_bool == True):
params['random_state'] = seed
self.clf = clf(**params)
def train(self, x_train, y_train):
self.clf.fit(x_train, y_train)
def predict(self, x):
return self.clf.predict(x)
def get_oof(clf, x_train, y, x_test):
oof_train = np.zeros((ntrain,))
oof_test = np.zeros((ntest,))
oof_test_skf = np.empty((NFOLDS, ntest))
for i, (train_index, test_index) in enumerate(kf):
print('\nFold {}'.format(i))
x_tr = x_train[train_index]
y_tr = y[train_index]
x_te = x_train[test_index]
clf.train(x_tr, y_tr)
oof_train[test_index] = clf.predict(x_te)
oof_test_skf[i, :] = clf.predict(x_test)
oof_test[:] = oof_test_skf.mean(axis=0)
return oof_train.reshape(-1, 1), oof_test.reshape(-1, 1)
def cleanName(text):
try:
textProc = text.lower()
textProc = " ".join(map(str.strip, re.split('(\d+)',textProc)))
regex = re.compile(u'[^[:alpha:]]')
textProc = regex.sub(" ", textProc)
textProc = " ".join(textProc.split())
return textProc
except:
return "name error"
def rmse(y, y0):
assert len(y) == len(y0)
return np.sqrt(np.mean(np.power((y - y0), 2)))
print("\nData Load Stage")
training = pd.read_csv('../input/train.csv', index_col = "item_id", parse_dates = ["activation_date"])
traindex = training.index
testing = pd.read_csv('../input/test.csv', index_col = "item_id", parse_dates = ["activation_date"])
testdex = testing.index
ntrain = training.shape[0]
ntest = testing.shape[0]
kf = KFold(ntrain, n_folds=NFOLDS, shuffle=True, random_state=SEED)
y = training.deal_probability.copy()
training.drop("deal_probability",axis=1, inplace=True)
print('Train shape: {} Rows, {} Columns'.format(*training.shape))
print('Test shape: {} Rows, {} Columns'.format(*testing.shape))
print("Combine Train and Test")
df = pd.concat([training,testing],axis=0)
del training, testing
gc.collect()
print('\nAll Data shape: {} Rows, {} Columns'.format(*df.shape))
print("Feature Engineering")
df["price"] = np.log(df["price"]+0.001)
df["price"].fillna(-999,inplace=True)
df["image_top_1"].fillna(-999,inplace=True)
print("\nCreate Time Variables")
df["Weekday"] = df['activation_date'].dt.weekday
df["Weekd of Year"] = df['activation_date'].dt.week
df["Day of Month"] = df['activation_date'].dt.day
# Create Validation Index and Remove Dead Variables
training_index = df.loc[df.activation_date<=pd.to_datetime('2017-04-07')].index
validation_index = df.loc[df.activation_date>=pd.to_datetime('2017-04-08')].index
df.drop(["activation_date","image"],axis=1,inplace=True)
print("\nEncode Variables")
categorical = ["user_id","region","city","parent_category_name","category_name","user_type","image_top_1","param_1","param_2","param_3"]
print("Encoding :",categorical)
# Encoder:
lbl = preprocessing.LabelEncoder()
for col in categorical:
df[col].fillna('Unknown')
df[col] = lbl.fit_transform(df[col].astype(str))
print("\nText Features")
# Feature Engineering
# Meta Text Features
textfeats = ["description", "title"]
#df['title'] = df['title'].apply(lambda x: cleanName(x))
#df["description"] = df["description"].apply(lambda x: cleanName(x))
for cols in textfeats:
df[cols] = df[cols].astype(str)
df[cols] = df[cols].astype(str).fillna('missing') # FILL NA
df[cols] = df[cols].str.lower() # Lowercase all text, so that capitalized words dont get treated differently
df[cols + '_num_words'] = df[cols].apply(lambda comment: len(comment.split())) # Count number of Words
df[cols + '_num_unique_words'] = df[cols].apply(lambda comment: len(set(w for w in comment.split())))
df[cols + '_words_vs_unique'] = df[cols+'_num_unique_words'] / df[cols+'_num_words'] * 100 # Count Unique Words
print("\n[TF-IDF] Term Frequency Inverse Document Frequency Stage")
russian_stop = set(stopwords.words('russian'))
tfidf_para = {
"stop_words": russian_stop,
"analyzer": 'word',
"token_pattern": r'\w{1,}',
"sublinear_tf": True,
"dtype": np.float32,
"norm": 'l2',
#"min_df":5,
#"max_df":.9,
"smooth_idf":False
}
def get_col(col_name): return lambda x: x[col_name]
##I added to the max_features of the description. It did not change my score much but it may be worth investigating
vectorizer = FeatureUnion([
('description',TfidfVectorizer(
ngram_range=(1, 2),
max_features=17000,
**tfidf_para,
preprocessor=get_col('description'))),
('title',CountVectorizer(
ngram_range=(1, 2),
stop_words = russian_stop,
#max_features=7000,
preprocessor=get_col('title')))
])
start_vect=time.time()
#Fit my vectorizer on the entire dataset instead of the training rows
#Score improved by .0001
vectorizer.fit(df.to_dict('records'))
ready_df = vectorizer.transform(df.to_dict('records'))
tfvocab = vectorizer.get_feature_names()
print("Vectorization Runtime: %0.2f Minutes"%((time.time() - start_vect)/60))
# Drop Text Cols
textfeats = ["description", "title"]
df.drop(textfeats, axis=1,inplace=True)
from sklearn.metrics import mean_squared_error
from math import sqrt
ridge_params = {'alpha':20.0, 'fit_intercept':True, 'normalize':False, 'copy_X':True,
'max_iter':None, 'tol':0.001, 'solver':'auto', 'random_state':SEED}
#Ridge oof method from Faron's kernel
#I was using this to analyze my vectorization, but figured it would be interesting to add the results back into the dataset
#It doesn't really add much to the score, but it does help lightgbm converge faster
ridge = SklearnWrapper(clf=Ridge, seed = SEED, params = ridge_params)
ridge_oof_train, ridge_oof_test = get_oof(ridge, ready_df[:ntrain], y, ready_df[ntrain:])
rms = sqrt(mean_squared_error(y, ridge_oof_train))
print('Ridge OOF RMSE: {}'.format(rms))
print("Modeling Stage")
ridge_preds = np.concatenate([ridge_oof_train, ridge_oof_test])
df['ridge_preds'] = ridge_preds
# Combine Dense Features with Sparse Text Bag of Words Features
X = hstack([csr_matrix(df.loc[traindex,:].values),ready_df[0:traindex.shape[0]]]) # Sparse Matrix
testing = hstack([csr_matrix(df.loc[testdex,:].values),ready_df[traindex.shape[0]:]])
tfvocab = df.columns.tolist() + tfvocab
for shape in [X,testing]:
print("{} Rows and {} Cols".format(*shape.shape))
print("Feature Names Length: ",len(tfvocab))
del df
gc.collect();
print("\nModeling Stage")
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.10, random_state=23)
del ridge_preds,vectorizer,ready_df
gc.collect();
print("Light Gradient Boosting Regressor")
lgbm_params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': 'rmse',
# 'max_depth': 15,
'num_leaves': 250,
'feature_fraction': 0.65,
'bagging_fraction': 0.85,
# 'bagging_freq': 5,
'learning_rate': 0.02,
'verbose': 0
}
lgtrain = lgb.Dataset(X_train, y_train,
feature_name=tfvocab,
categorical_feature = categorical)
lgvalid = lgb.Dataset(X_valid, y_valid,
feature_name=tfvocab,
categorical_feature = categorical)
modelstart = time.time()
lgb_clf = lgb.train(
lgbm_params,
lgtrain,
num_boost_round=16000,
valid_sets=[lgtrain, lgvalid],
valid_names=['train','valid'],
early_stopping_rounds=30,
verbose_eval=200
)
# Feature Importance Plot
f, ax = plt.subplots(figsize=[7,10])
lgb.plot_importance(lgb_clf, max_num_features=50, ax=ax)
plt.title("Light GBM Feature Importance")
plt.savefig('feature_import.png')
print("Model Evaluation Stage")
lgpred = lgb_clf.predict(testing)
#Mixing lightgbm with ridge. I haven't really tested if this improves the score or not
#blend = 0.95*lgpred + 0.05*ridge_oof_test[:,0]
lgsub = pd.DataFrame(lgpred,columns=["deal_probability"],index=testdex)
lgsub['deal_probability'].clip(0.0, 1.0, inplace=True) # Between 0 and 1
lgsub.to_csv("lgsub.csv",index=True,header=True)
print("Model Runtime: %0.2f Minutes"%((time.time() - modelstart)/60))
print("Notebook Runtime: %0.2f Minutes"%((time.time() - notebookstart)/60))
|
apache-2.0
|
dh4gan/oberon
|
plot/EBM_movie.py
|
1
|
2378
|
# Written 17/1/14 by dh4gan
# Code reads in a sample of snapshots from EBM and plots them
import matplotlib.pyplot as plt
import numpy as np
from os import system
# File order
# 0 x
# 1 latitude
# 2 T
# 3 C
# 4 Q
# 5 IR
# 6 albedo
# 7 insolation
# 8 tau
# 9 ice fraction
# 10 habitability index
# Set up tuples and dictionaries
variablekeys = ("x","lat", "T", "C", "Q","IR","Albedo", "S",
"tau", "ice","hab")
variablenames = ("x", r"$\lambda$", "T (K)", "C (/)", r"Net Heating ($erg\,s^{-1}\, cm^{-2}$)",
r"IR Cooling ($erg\,s^{-1}\, cm^{-2}$)",r"Albedo", r"Mean Insolation ($erg\,s^{-1}\, cm^{-2}$)",
"Optical Depth", r"$f_{ice}$","Habitability Index")
variablecolumns = (0,1,2,3,4,5,6,7,8,9,10)
nvar = len(variablekeys)
namedict = {}
coldict = {}
for i in range(len(variablekeys)):
namedict[variablekeys[i]] = variablenames[i]
coldict[variablekeys[i]] = variablecolumns[i]
# Open log file and read contents
prefix = raw_input("What is the name of the World? ")
initialdump = input("Which dump to start from? ")
finaldump = input("Which is the final dump?")
nfiles = finaldump-initialdump +1
# Define x axis as latitude (always)
xkey = 'lat'
ix = coldict[xkey]
# Pick variable to time average
print "Which variable is to be plotted?"
for i in range(len(variablekeys)):
if(i!=ix): print variablekeys[i],":\t \t", namedict[variablekeys[i]]
keyword = raw_input("Enter appropriate keyword: ")
ykey = keyword
iy = coldict[keyword]
alldata = []
print "Plotting"
for i in range(nfiles):
idump = initialdump + i
print idump
# Read in data
inputfile = prefix+'.'+str(idump)
outputfile = inputfile+'.png'
data = np.genfromtxt(inputfile,skiprows=1)
if(i==0):
xdata = data[:,ix]
data = data[:,iy]
# Add to totals
fig1 = plt.figure()
ax = fig1.add_subplot(111)
ax.set_xlabel(namedict[xkey], fontsize = 16)
ax.set_ylabel(namedict[ykey], fontsize = 16)
ax.plot(xdata,data)
fig1.savefig(outputfile, format='ps')
# Make movie
# Command for converting images into gifs - machine dependent
#convertcommand = '/opt/ImageMagick/bin/convert '
convertcommand = '/usr/bin/convert '
print "Making movie"
system(convertcommand +prefix+'*.png movie.gif')
system('rm '+prefix+'*.png')
|
gpl-3.0
|
jingr1/SelfDrivingCar
|
overloadingoprator.py
|
1
|
1463
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-10-29 18:19:46
# @Author : jingray ([email protected])
# @Link : http://www.jianshu.com/u/01fb0364467d
# @Version : $Id$
import matplotlib.pyplot as plt
'''
The color class creates a color from 3 values, r, g, and b (red, green, and blue).
attributes:
r - a value between 0-255 for red
g - a value between 0-255 for green
b - a value between 0-255 for blue
'''
class Color(object):
# Initializes a color with rgb values
def __init__(self, r, g, b):
self.r = r
self.g = g
self.b = b
# Called when a Color object is printed out
def __repr__(self):
'''Display a color swatch and returns a text description of r,g,b values'''
plt.imshow([[(self.r/255, self.g/255, self.b/255)]])
return 'r, g, b = ' + str(self.r) + ', ' + str(self.g) + ', ' + str(self.b)
## TODO: Complete this add function to add two colors together
def __add__(self, other):
'''Adds the r, g, and b components of each color together
and averaging them.
The new Color object, with these averaged rgb values,
is returned.'''
self.r = (self.r+other.r)/2
self.g = (self.g+other.g)/2
self.b = (self.b+other.b)/2
return self
color1 = color.Color(250, 0, 0)
print(color1)
color2 = color.Color(0, 50, 200)
print(color2)
new_color = color1 + color2
print(new_color)
|
mit
|
AnasGhrab/scikit-learn
|
sklearn/utils/multiclass.py
|
83
|
12343
|
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.ptp(y.data) == 0 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.all(clf.classes_ == unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
|
bsd-3-clause
|
nishantnath/MusicPredictiveAnalysis_EE660_USCFall2015
|
Code/Machine_Learning_Algos/10k_Tests/ml_classification_kmeans_training.py
|
1
|
9358
|
# !/usr/bin/env python
__author__ = 'NishantNath'
'''
Using : Python 2.7+ (backward compatibility exists for Python 3.x if separate environment created)
Required files : hdf5_getters.py, find_second_max_value.py
Required packages : numpy, pandas, matplotlib, sklearn, pickle
Steps:
1.
# Uses k-means method for classification training
'''
import pandas
import matplotlib.pyplot as mpyplot
import pylab
import numpy
import sklearn
import pickle
import crop_rock
# [0: 'CLASSICAL', 1: 'METAL', 2: 'DANCE', 3: 'JAZZ']
# [4:'FOLK', 5: 'SOUL', 6: 'ROCK', 7: 'POP', 8: 'BLUES']
if __name__ == '__main__':
print '--- started ---'
input1 = pickle.load(open("msd_train_t1.pkl", "rb"))
input2 = pickle.load(open("msd_train_t2.pkl", "rb"))
input3 = pickle.load(open("msd_train_t3.pkl", "rb"))
input4 = pickle.load(open("msd_train_t4.pkl", "rb"))
input5 = pickle.load(open("msd_train_t5.pkl", "rb"))
# print input1.shape[0]
# input = pickle.load(open("msd_train.pkl", "rb"))
maxval1 = crop_rock.find_second_max_value(input1)
maxval2 = crop_rock.find_second_max_value(input2)
maxval3 = crop_rock.find_second_max_value(input3)
maxval4 = crop_rock.find_second_max_value(input4)
maxval5 = crop_rock.find_second_max_value(input5)
# print maxval1
# maxval = crop_rock.find_second_max_value(input)
filtered1 = crop_rock.drop_excess_rows(input1, maxval1)
filtered2 = crop_rock.drop_excess_rows(input2, maxval2)
filtered3 = crop_rock.drop_excess_rows(input3, maxval3)
filtered4 = crop_rock.drop_excess_rows(input4, maxval4)
filtered5 = crop_rock.drop_excess_rows(input5, maxval5)
# print filtered1.shape[0]
# filtered = crop_rock.drop_excess_rows(input, maxval)
#handling missing data
filtered1 = filtered1[filtered1['Genre']!='UNCAT']; filtered1 = filtered1.dropna()
filtered2 = filtered2[filtered2['Genre']!='UNCAT']; filtered2 = filtered2.dropna()
filtered3 = filtered3[filtered3['Genre']!='UNCAT']; filtered3 = filtered3.dropna()
filtered4 = filtered4[filtered4['Genre']!='UNCAT']; filtered4 = filtered4.dropna()
filtered5 = filtered5[filtered5['Genre']!='UNCAT']; filtered5 = filtered5.dropna()
# print filtered1
# filtered = filtered[filtered['Genre']!='UNCAT']; filtered.dropna()
# range(2,76) means its goes from col 2 to col 75
df_input1_data = filtered1[list(range(2,76))].as_matrix()
df_input1_target = filtered1[list(range(0,1))].as_matrix()
df_input2_data = filtered2[list(range(2,76))].as_matrix()
df_input2_target = filtered2[list(range(0,1))].as_matrix()
df_input3_data = filtered3[list(range(2,76))].as_matrix()
df_input3_target = filtered3[list(range(0,1))].as_matrix()
df_input4_data = filtered4[list(range(2,76))].as_matrix()
df_input4_target = filtered4[list(range(0,1))].as_matrix()
df_input5_data = filtered5[list(range(2,76))].as_matrix()
df_input5_target = filtered5[list(range(0,1))].as_matrix()
# df_input_data = filtered[list(range(2,76))].as_matrix()
# df_input_target = filtered[list(range(0,1))].as_matrix()
# Naive Gaussian Bayes
from sklearn.cluster import KMeans
from numpy.random import RandomState
# Simple PCA
from sklearn.decomposition import PCA
pca1 = PCA(n_components=6) #from optimal pca components chart n_components=6
pca2 = PCA(n_components=6) #from optimal pca components chart n_components=6
pca3 = PCA(n_components=6) #from optimal pca components chart n_components=6
pca4 = PCA(n_components=6) #from optimal pca components chart n_components=6
pca5 = PCA(n_components=6) #from optimal pca components chart n_components=6
# pca = PCA(n_components=6) #from optimal pca components chart n_components=6
pca1.fit(df_input1_data)
pca2.fit(df_input2_data)
pca3.fit(df_input3_data)
pca4.fit(df_input4_data)
pca5.fit(df_input5_data)
# pca.fit(df_input_data)
# Reduced Feature Set
df_input1_data = pca1.transform(df_input1_data)
df_input2_data = pca2.transform(df_input2_data)
df_input3_data = pca3.transform(df_input3_data)
df_input4_data = pca4.transform(df_input4_data)
df_input5_data = pca5.transform(df_input5_data)
# df_input_data = pca.transform(df_input_data)
# Naive Bayes (gaussian model)
kmeans1 = KMeans(n_clusters=9, random_state=RandomState(9))
kmeans1.fit(df_input1_data,numpy.ravel(df_input1_target))
pickle.dump(kmeans1, open('model_kmeans_t1.pkl', 'wb'))
kmeans2 = KMeans(n_clusters=5, random_state=RandomState(9))
kmeans2.fit(df_input2_data,numpy.ravel(df_input2_target))
pickle.dump(kmeans2, open('model_kmeans_t2.pkl', 'wb'))
kmeans3 = KMeans(n_clusters=5, random_state=RandomState(9))
kmeans3.fit(df_input3_data,numpy.ravel(df_input3_target))
pickle.dump(kmeans3, open('model_kmeans_t3.pkl', 'wb'))
kmeans4 = KMeans(n_clusters=5, random_state=RandomState(9))
kmeans4.fit(df_input4_data,numpy.ravel(df_input4_target))
pickle.dump(kmeans4, open('model_kmeans_t4.pkl', 'wb'))
kmeans5 = KMeans(n_clusters=5, random_state=RandomState(9))
kmeans5.fit(df_input5_data,numpy.ravel(df_input5_target))
pickle.dump(kmeans5, open('model_kmeans_t5.pkl', 'wb'))
print kmeans1.labels_
# kmeans = KMeans(n_clusters=5, random_state=RandomState(9)
# kmeans.fit(df_input_data,numpy.ravel(df_input_target))
# pickle.dump(kmeans, open('model_kmeans_train.pkl', 'wb'))
predicted1 = kmeans1.predict(df_input1_data)
predicted2 = kmeans2.predict(df_input2_data)
predicted3 = kmeans3.predict(df_input3_data)
predicted4 = kmeans4.predict(df_input4_data)
predicted5 = kmeans5.predict(df_input5_data)
# predicted = kmeans.predict(df_input_data)
matches1 = (predicted1 == [item for sublist in df_input1_target for item in sublist])
matches2 = (predicted2 == [item for sublist in df_input2_target for item in sublist])
matches3 = (predicted3 == [item for sublist in df_input3_target for item in sublist])
matches4 = (predicted4 == [item for sublist in df_input4_target for item in sublist])
matches5 = (predicted5 == [item for sublist in df_input5_target for item in sublist])
# matches = (predicted == [item for sublist in df_input_target for item in sublist])
print 'using excess rock & uncats removed'
print predicted1, type(predicted1)
print matches1, type(matches1)
# print "Accuracy of T1 : ", (matches1.count() / float(len(matches1)))
# print "Accuracy of T2 : ", (matches2.count() / float(len(matches2)))
# print "Accuracy of T3 : ", (matches3.count() / float(len(matches3)))
# print "Accuracy of T4 : ", (matches4.count() / float(len(matches4)))
# print "Accuracy of T5 : ", (matches5.count() / float(len(matches5)))
# # print "Accuracy of Training : ", (matches.count() / float(len(matches)))
#
# x1 = input1[input1['Genre']!='UNCAT'].dropna()
# x_df_input1_data = x1[list(range(2,76))].as_matrix()
# x_df_input1_target = x1[list(range(0,1))].as_matrix()
#
# x2 = input2[input2['Genre']!='UNCAT'].dropna()
# x_df_input2_data = x2[list(range(2,76))].as_matrix()
# x_df_input2_target = x2[list(range(0,1))].as_matrix()
#
# x3 = input3[input3['Genre']!='UNCAT'].dropna()
# x_df_input3_data = x3[list(range(2,76))].as_matrix()
# x_df_input3_target = x3[list(range(0,1))].as_matrix()
#
# x4 = input4[input4['Genre']!='UNCAT'].dropna()
# x_df_input4_data = x4[list(range(2,76))].as_matrix()
# x_df_input4_target = x4[list(range(0,1))].as_matrix()
#
# x5 = input5[input5['Genre']!='UNCAT'].dropna()
# x_df_input5_data = x5[list(range(2,76))].as_matrix()
# x_df_input5_target = x5[list(range(0,1))].as_matrix()
#
# # x = input[input['Genre']!='UNCAT'].dropna()
# # x_df_input_data = x[list(range(2,76))].as_matrix()
# # x_df_input_target = x[list(range(0,1))].as_matrix()
#
# predicted1 = kmeans1.predict(x_df_input1_data)
# predicted2 = kmeans2.predict(x_df_input2_data)
# predicted3 = kmeans3.predict(x_df_input3_data)
# predicted4 = kmeans4.predict(x_df_input4_data)
# predicted5 = kmeans5.predict(x_df_input5_data)
# # predicted = kmeans.predict(x_df_input_data)
#
# matches1 = (predicted1 == [item for sublist in x_df_input1_target for item in sublist])
# matches2 = (predicted2 == [item for sublist in x_df_input2_target for item in sublist])
# matches3 = (predicted3 == [item for sublist in x_df_input3_target for item in sublist])
# matches4 = (predicted4 == [item for sublist in x_df_input4_target for item in sublist])
# matches5 = (predicted5 == [item for sublist in x_df_input5_target for item in sublist])
# # matches = (predicted == [item for sublist in x_df_input_target for item in sublist])
#
# print 'using uncats removed'
# print "Accuracy of T1 : ", (matches1.count() / float(len(matches1)))
# print "Accuracy of T2 : ", (matches2.count() / float(len(matches2)))
# print "Accuracy of T3 : ", (matches3.count() / float(len(matches3)))
# print "Accuracy of T4 : ", (matches4.count() / float(len(matches4)))
# print "Accuracy of T5 : ", (matches5.count() / float(len(matches5)))
# # print "Accuracy of Training : ", (matches.count() / float(len(matches)))
print '--- done ---'
|
mit
|
abelcarreras/DynaPhoPy
|
dynaphopy/power_spectrum/__init__.py
|
1
|
13388
|
import numpy as np
import sys
import matplotlib.pyplot as plt
from dynaphopy.power_spectrum import mem
from dynaphopy.power_spectrum import correlation
unit_conversion = 0.00010585723 # u * A^2 * THz -> eV*ps
def _progress_bar(progress, label):
bar_length = 30
status = ""
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = 'Progress error\r\n'
if progress < 0:
progress = 0
status = 'Halt ...\r\n'
if progress >= 1:
progress = 1
status = 'Done...\r\n'
block = int(round(bar_length*progress))
text = '\r{0}: [{1}] {2:.2f}% {3}'.format(label, '#'*block + '-'*(bar_length-block),
progress*100, status)
sys.stdout.write(text)
sys.stdout.flush()
def _division_of_data(resolution, number_of_data, time_step):
piece_size = round(1./(time_step*resolution))
number_of_pieces = int((number_of_data-1)/piece_size)
if number_of_pieces > 0:
interval = (number_of_data - piece_size)/number_of_pieces
else:
interval = 0
number_of_pieces = 1
piece_size = number_of_data
pieces = []
for i in range(number_of_pieces+1):
ini = int((piece_size/2+i*interval)-piece_size/2)
fin = int((piece_size/2+i*interval)+piece_size/2)
pieces.append([ini, fin])
return pieces
#############################################
# Fourier transform - direct method #
#############################################
def get_fourier_direct_power_spectra(vq, trajectory, parameters):
test_frequency_range = np.array(parameters.frequency_range)
psd_vector = []
if not parameters.silent:
_progress_bar(0, "Fourier")
for i in range(vq.shape[1]):
psd_vector.append(correlation.correlation_par(test_frequency_range,
vq[:, i],
# np.lib.pad(vq[:, i], (2500, 2500), 'constant'),
trajectory.get_time_step_average(),
step=parameters.correlation_function_step,
integration_method=parameters.integration_method))
if not parameters.silent:
_progress_bar(float(i + 1) / vq.shape[1], "Fourier")
psd_vector = np.array(psd_vector).T
return psd_vector * unit_conversion
#####################################
# Maximum entropy method method #
#####################################
def get_mem_power_spectra(vq, trajectory, parameters):
test_frequency_range = np.array(parameters.frequency_range)
# Check number of coefficients
if vq.shape[0] <= parameters.number_of_coefficients_mem+1:
print('Number of coefficients should be smaller than the number of time steps')
exit()
psd_vector = []
if not parameters.silent:
_progress_bar(0, 'M. Entropy')
for i in range(vq.shape[1]):
psd_vector.append(mem.mem(np.ascontiguousarray(test_frequency_range),
np.ascontiguousarray(vq[:, i]),
trajectory.get_time_step_average(),
coefficients=parameters.number_of_coefficients_mem))
if not parameters.silent:
_progress_bar(float(i + 1) / vq.shape[1], 'M. Entropy')
psd_vector = np.nan_to_num(np.array(psd_vector).T)
return psd_vector * unit_conversion
#####################################
# Coefficient analysis (MEM) #
#####################################
def mem_coefficient_scan_analysis(vq, trajectory, parameters):
from dynaphopy.analysis.fitting import fitting_functions
mem_full_dict = {}
for i in range(vq.shape[1]):
test_frequency_range = parameters.frequency_range
fit_data = []
scan_params = []
power_spectra = []
if not parameters.silent:
_progress_bar(0, 'ME Coeff.')
for number_of_coefficients in parameters.mem_scan_range:
power_spectrum = mem.mem(np.ascontiguousarray(test_frequency_range),
np.ascontiguousarray(vq[:, i]),
trajectory.get_time_step_average(),
coefficients=number_of_coefficients)
power_spectrum *= unit_conversion
guess_height = np.max(power_spectrum)
guess_position = test_frequency_range[np.argmax(power_spectrum)]
Fitting_function_class = fitting_functions.fitting_functions[parameters.fitting_function]
if np.isnan(power_spectrum).any():
print('Warning: power spectrum error, skipping point {0}'.format(number_of_coefficients))
continue
# Fitting_curve = fitting_functions[parameters.fitting_function]
fitting_function = Fitting_function_class(test_frequency_range,
power_spectrum,
guess_height=guess_height,
guess_position=guess_position)
fitting_parameters = fitting_function.get_fitting()
if not fitting_parameters['all_good']:
print('Warning: Fitting error, skipping point {0}'.format(number_of_coefficients))
continue
# frequency = fitting_parameters['peak_position']
area = fitting_parameters['area']
width = fitting_parameters['width']
# base_line = fitting_parameters['base_line']
maximum = fitting_parameters['maximum']
error = fitting_parameters['global_error']
fit_data.append([number_of_coefficients, width, error, area])
scan_params.append(fitting_function._fit_params)
power_spectra.append(power_spectrum)
if not(parameters.silent):
_progress_bar(float(number_of_coefficients + 1) / parameters.mem_scan_range[-1], "M.E. Method")
fit_data = np.array(fit_data).T
if fit_data.size == 0:
continue
best_width = np.average(fit_data[1], weights=np.sqrt(1./fit_data[2]))
best_index = int(np.argmin(fit_data[2]))
power_spectrum = power_spectra[best_index]
mem_full_dict.update({i: [power_spectrum, best_width, best_index, fit_data, scan_params]})
for i in range(vq.shape[1]):
if not i in mem_full_dict.keys():
continue
print ('Peak # {0}'.format(i+1))
print('------------------------------------')
print ('Estimated width : {0} THz'.format(mem_full_dict[i][1]))
fit_data = mem_full_dict[i][3]
scan_params = mem_full_dict[i][4]
best_index = mem_full_dict[i][2]
print ('Position (best fit): {0} THz'.format(scan_params[best_index][0]))
print ('Area (best fit): {0} eV'.format(fit_data[3][best_index]))
print ('Coefficients num (best fit): {0}'.format(fit_data[0][best_index]))
print ('Fitting global error (best fit): {0}'.format(fit_data[2][best_index]))
print ("\n")
plt.figure(i+1)
plt.suptitle('Peak {0}'.format(i+1))
ax1 = plt.subplot2grid((2, 2), (0, 0), colspan=2)
ax2 = plt.subplot2grid((2, 2), (1, 0))
ax3 = plt.subplot2grid((2, 2), (1, 1))
ax1.set_xlabel('Number of coefficients')
ax1.set_ylabel('Width [THz]')
ax1.set_title('Peak width')
ax1.plot(fit_data[0], fit_data[1])
ax1.plot((fit_data[0][0], fit_data[0][-1]), (mem_full_dict[i][1], mem_full_dict[i][1]), 'k-')
ax2.set_xlabel('Number of coefficients')
ax2.set_ylabel('(Global error)^-1')
ax2.set_title('Fitting error')
ax2.plot(fit_data[0], np.sqrt(1./fit_data[2]))
ax3.set_xlabel('Frequency [THz]')
ax3.set_title('Best curve fitting')
ax3.plot(test_frequency_range, mem_full_dict[i][0], label='Power spectrum')
ax3.plot(test_frequency_range,
fitting_function._function(test_frequency_range, *scan_params[best_index]),
label='{} fit'.format(fitting_function.curve_name))
plt.show()
#####################################
# FFT method (NUMPY) #
#####################################
def _numpy_power(frequency_range, data, time_step):
pieces = _division_of_data(frequency_range[1] - frequency_range[0],
data.size,
time_step)
ps = []
for i_p in pieces:
data_piece = data[i_p[0]:i_p[1]]
data_piece = np.correlate(data_piece, data_piece, mode='same') / data_piece.size
ps.append(np.abs(np.fft.fft(data_piece))*time_step)
ps = np.average(ps,axis=0)
freqs = np.fft.fftfreq(data_piece.size, time_step)
idx = np.argsort(freqs)
return np.interp(frequency_range, freqs[idx], ps[idx])
def get_fft_numpy_spectra(vq, trajectory, parameters):
test_frequency_range = np.array(parameters.frequency_range)
requested_resolution = test_frequency_range[1]-test_frequency_range[0]
maximum_resolution = 1./(trajectory.get_time_step_average()*(vq.shape[0]+parameters.zero_padding))
if requested_resolution < maximum_resolution:
print('Power spectrum resolution requested unavailable, using maximum: {0:9.6f} THz'.format(maximum_resolution))
print('If you need higher resolution increase the number of data')
psd_vector = []
if not(parameters.silent):
_progress_bar(0, 'FFT')
for i in range(vq.shape[1]):
psd_vector.append(_numpy_power(test_frequency_range, vq[:, i],
trajectory.get_time_step_average()),
)
if not(parameters.silent):
_progress_bar(float(i + 1) / vq.shape[1], 'FFT')
psd_vector = np.array(psd_vector).T
return psd_vector * unit_conversion
#####################################
# FFT method (FFTW) #
#####################################
def _fftw_power(frequency_range, data, time_step):
import pyfftw
from multiprocessing import cpu_count
pieces = _division_of_data(frequency_range[1] - frequency_range[0],
data.size,
time_step)
ps = []
for i_p in pieces:
data_piece = data[i_p[0]:i_p[1]]
data_piece = np.correlate(data_piece, data_piece, mode='same') / data_piece.size
ps.append(np.abs(pyfftw.interfaces.numpy_fft.fft(data_piece, threads=cpu_count()))*time_step)
ps = np.average(ps,axis=0)
freqs = np.fft.fftfreq(data_piece.size, time_step)
idx = np.argsort(freqs)
return np.interp(frequency_range, freqs[idx], ps[idx])
def get_fft_fftw_power_spectra(vq, trajectory, parameters):
test_frequency_range = np.array(parameters.frequency_range)
psd_vector = []
if not(parameters.silent):
_progress_bar(0, 'FFTW')
for i in range(vq.shape[1]):
psd_vector.append(_fftw_power(test_frequency_range, vq[:, i],
trajectory.get_time_step_average()),
)
if not(parameters.silent):
_progress_bar(float(i + 1) / vq.shape[1], 'FFTW')
psd_vector = np.array(psd_vector).T
return psd_vector * unit_conversion
#####################################
# FFT method (CUDA) #
#####################################
def _cuda_power(frequency_range, data, time_step):
from cuda_functions import cuda_acorrelate, cuda_fft
pieces = _division_of_data(frequency_range[1] - frequency_range[0],
data.size,
time_step)
ps = []
for i_p in pieces:
data_piece = data[i_p[0]:i_p[1]]
data_piece = cuda_acorrelate(data_piece, mode='same')/data_piece.size
ps.append(np.abs(cuda_fft(data_piece)*time_step))
ps = np.average(ps,axis=0)
freqs = np.fft.fftfreq(data_piece.size, time_step)
idx = np.argsort(freqs)
return np.interp(frequency_range, freqs[idx], ps[idx])
def get_fft_cuda_power_spectra(vq, trajectory, parameters):
test_frequency_range = np.array(parameters.frequency_range)
psd_vector = []
if not(parameters.silent):
_progress_bar(0, 'CUDA')
for i in range(vq.shape[1]):
psd_vector.append(_cuda_power(test_frequency_range, vq[:, i],
trajectory.get_time_step_average()),
)
if not(parameters.silent):
_progress_bar(float(i + 1) / vq.shape[1], 'CUDA')
psd_vector = np.array(psd_vector).T
return psd_vector * unit_conversion
#######################
# Functions summary #
#######################
power_spectrum_functions = {
0: [get_fourier_direct_power_spectra, 'Fourier transform'],
1: [get_mem_power_spectra, 'Maximum entropy method'],
2: [get_fft_numpy_spectra, 'Fast Fourier transform (Numpy)'],
3: [get_fft_fftw_power_spectra, 'Fast Fourier transform (FFTW)'],
4: [get_fft_cuda_power_spectra, 'Fast Fourier transform (CUDA)']
}
|
mit
|
CforED/Machine-Learning
|
examples/bicluster/plot_spectral_biclustering.py
|
403
|
2011
|
"""
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
|
bsd-3-clause
|
trungnt13/scikit-learn
|
sklearn/linear_model/tests/test_perceptron.py
|
378
|
1815
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
|
bsd-3-clause
|
Djabbz/scikit-learn
|
sklearn/datasets/mldata.py
|
309
|
7838
|
"""Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: BSD 3 clause
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home, Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname:
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name: optional, default: 'label'
Name or index of the column containing the target values.
data_name: optional, default: 'data'
Name or index of the column containing the data.
transpose_data: optional, default: True
If True, transpose the downloaded data array.
data_home: optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> import tempfile
>>> test_data_home = tempfile.mkdtemp()
>>> iris = fetch_mldata('iris', data_home=test_data_home)
>>> iris.target.shape
(150,)
>>> iris.data.shape
(150, 4)
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the sklearn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True,
... data_home=test_data_home)
>>> leuk.data.shape
(72, 7129)
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0, data_home=test_data_home)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0',
... data_home=test_data_home)
>>> import shutil
>>> shutil.rmtree(test_data_home)
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to sklearn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
# The following is used by nosetests to setup the docstring tests fixture
def setup_module(module):
# setup mock urllib2 module to avoid downloading from mldata.org
from sklearn.utils.testing import install_mldata_mock
install_mldata_mock({
'iris': {
'data': np.empty((150, 4)),
'label': np.empty(150),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
'leukemia': {
'data': np.empty((72, 7129)),
},
})
def teardown_module(module):
from sklearn.utils.testing import uninstall_mldata_mock
uninstall_mldata_mock()
|
bsd-3-clause
|
xavierwu/scikit-learn
|
examples/semi_supervised/plot_label_propagation_versus_svm_iris.py
|
286
|
2378
|
"""
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
|
bsd-3-clause
|
eternallyBaffled/itrade
|
itrade_wxprint.py
|
1
|
11931
|
#!/usr/bin/env python
# ============================================================================
# Project Name : iTrade
# Module Name : itrade_wxprint.py
#
# Description: wxPython Printing Back-End
#
# The Original Code is iTrade code (http://itrade.sourceforge.net).
#
# The Initial Developer of the Original Code is Gilles Dumortier.
#
# Portions created by the Initial Developer are Copyright (C) 2004-2008 the
# Initial Developer. All Rights Reserved.
#
# Contributor(s):
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see http://www.gnu.org/licenses/gpl.html
#
# History Rev Description
# 2007-01-27 dgil Wrote it from scratch
# ============================================================================
# ============================================================================
# Imports
# ============================================================================
# python system
import logging
# iTrade system
import itrade_config
from itrade_logging import *
from itrade_local import message,getLang
# wxPython system
if not itrade_config.nowxversion:
import itrade_wxversion
import wx
# matplotlib system
import matplotlib.backends.backend_wxagg
from matplotlib.backends.backend_wx import RendererWx
# ============================================================================
# CanvasPrintout
# ============================================================================
class CanvasPrintout(wx.Printout):
def __init__(self, canvas):
wx.Printout.__init__(self,title='Graph')
self.canvas = canvas
# width, in inches of output figure (approximate)
self.width = 5
self.margin = 0.2
def HasPage(self, page):
#current only supports 1 page print
return page == 1
def GetPageInfo(self):
return (1, 1, 1, 1)
def OnPrintPage(self, page):
self.canvas.draw()
dc = self.GetDC()
(ppw,pph) = self.GetPPIPrinter() # printer's pixels per in
(pgw,pgh) = self.GetPageSizePixels() # page size in pixels
(dcw,dch) = dc.GetSize()
(grw,grh) = self.canvas.GetSizeTuple()
# save current figure dpi resolution and bg color,
# so that we can temporarily set them to the dpi of
# the printer, and the bg color to white
bgcolor = self.canvas.figure.get_facecolor()
fig_dpi = self.canvas.figure.dpi.get()
# draw the bitmap, scaled appropriately
vscale = float(ppw) / fig_dpi
# set figure resolution,bg color for printer
self.canvas.figure.dpi.set(ppw)
self.canvas.figure.set_facecolor('#FFFFFF')
renderer = RendererWx(self.canvas.bitmap, self.canvas.figure.dpi)
self.canvas.figure.draw(renderer)
self.canvas.bitmap.SetWidth( int(self.canvas.bitmap.GetWidth() * vscale))
self.canvas.bitmap.SetHeight( int(self.canvas.bitmap.GetHeight()* vscale))
self.canvas.draw()
# page may need additional scaling on preview
page_scale = 1.0
if self.IsPreview(): page_scale = float(dcw)/pgw
# get margin in pixels = (margin in in) * (pixels/in)
top_margin = int(self.margin * pph * page_scale)
left_margin = int(self.margin * ppw * page_scale)
# set scale so that width of output is self.width inches
# (assuming grw is size of graph in inches....)
user_scale = (self.width * fig_dpi * page_scale)/float(grw)
dc.SetDeviceOrigin(left_margin,top_margin)
dc.SetUserScale(user_scale,user_scale)
# this cute little number avoid API inconsistencies in wx
try:
dc.DrawBitmap(self.canvas.bitmap, 0, 0)
except:
try:
dc.DrawBitmap(self.canvas.bitmap, (0, 0))
except:
pass
self.canvas.m_parent.drawAllObjects(dc)
# restore original figure resolution
self.canvas.figure.set_facecolor(bgcolor)
self.canvas.figure.dpi.set(fig_dpi)
# __x self.canvas.m_parent.draw()
return True
# ============================================================================
class MyPrintout(wx.Printout):
def __init__(self, canvas):
wx.Printout.__init__(self)
self.m_canvas = canvas
def OnBeginDocument(self, start, end):
info("MyPrintout.OnBeginDocument\n")
self.base_OnBeginDocument(start,end)
def OnEndDocument(self):
info("MyPrintout.OnEndDocument\n")
self.base_OnEndDocument()
def OnBeginPrinting(self):
info("MyPrintout.OnBeginPrinting\n")
self.base_OnBeginPrinting()
def OnEndPrinting(self):
info("MyPrintout.OnEndPrinting\n")
self.base_OnEndPrinting()
def OnPreparePrinting(self):
info("MyPrintout.OnPreparePrinting\n")
self.base_OnPreparePrinting()
def HasPage(self, page):
info("MyPrintout.HasPage: %d\n" % page)
if page <= 2:
return True
else:
return False
def GetPageInfo(self):
info("MyPrintout.GetPageInfo\n")
return (1, 2, 1, 2)
def OnPrintPage(self, page):
info("MyPrintout.OnPrintPage: %d\n" % page)
dc = self.GetDC()
#-------------------------------------------
# One possible method of setting scaling factors...
width,height = self.m_canvas.GetSizeTuple()
maxX,maxY = width,height
# Let's have at least 50 device units margin
marginX = 50
marginY = 50
# Add the margin to the graphic size
maxX = maxX + (2 * marginX)
maxY = maxY + (2 * marginY)
# Get the size of the DC in pixels
(w, h) = dc.GetSizeTuple()
# Calculate a suitable scaling factor
scaleX = float(w) / maxX
scaleY = float(h) / maxY
# Use x or y scaling factor, whichever fits on the DC
actualScale = min(scaleX, scaleY)
# Calculate the position on the DC for centering the graphic
posX = (w - (width * actualScale)) / 2.0
posY = (h - (height * actualScale)) / 2.0
# Set the scale and origin
dc.SetUserScale(actualScale, actualScale)
dc.SetDeviceOrigin(int(posX), int(posY))
#-------------------------------------------
pandc = self.m_canvas.GetDC()
sz = pandc.GetSizeTuple()
dc.Blit(0,0, sz[0], sz[1], 0, 0, pandc)
#dc.DrawText(message('print_page') % page, marginX/2, maxY-marginY)
return True
# ============================================================================
# iTrade_wxPanelPrint
#
# m_parent parent panel or frame or window
# m_pd PrintData
# ============================================================================
class iTrade_wxPanelPrint(object):
def __init__(self, parent , po, orientation = wx.PORTRAIT):
self.m_parent = parent
self.m_pd = wx.PrintData()
if getLang()=='us':
self.m_pd.SetPaperId(wx.PAPER_LETTER)
else:
self.m_pd.SetPaperId(wx.PAPER_A4)
self.m_pd.SetOrientation(orientation)
self.m_pd.SetPrintMode(wx.PRINT_MODE_PRINTER)
self.m_po = po
def OnPageSetup(self, evt):
psdd = wx.PageSetupDialogData(self.m_pd)
psdd.CalculatePaperSizeFromId()
dlg = wx.PageSetupDialog(self, psdd)
dlg.CentreOnParent()
dlg.ShowModal()
# this makes a copy of the wx.PrintData instead of just saving
# a reference to the one inside the PrintDialogData that will
# be destroyed when the dialog is destroyed
self.m_pd = wx.PrintData( dlg.GetPageSetupData().GetPrintData() )
dlg.Destroy()
def OnPrintPreview(self, event):
data = wx.PrintDialogData(self.m_pd)
printout = self.m_po(self.m_canvas)
printout2 = self.m_po(self.m_canvas)
self.preview = wx.PrintPreview(printout, printout2, data)
if not self.preview.Ok():
info("Houston, we have a problem...\n")
return
# be sure to have a Frame object
frameInst = self
while not isinstance(frameInst, wx.Frame):
frameInst = frameInst.GetParent()
# create the Frame for previewing
pfrm = wx.PreviewFrame(self.preview, frameInst, message('print_preview'))
pfrm.Initialize()
pfrm.SetPosition(self.m_parent.GetPosition())
pfrm.SetSize(self.m_parent.GetSize())
pfrm.Show(True)
def OnDoPrint(self, event):
pdd = wx.PrintDialogData(self.m_pd)
pdd.SetToPage(2)
printer = wx.Printer(pdd)
printout = self.m_po(self.m_canvas)
if not printer.Print(self.m_parent, printout, True):
#wx.MessageBox(message('print_errprinting'), message('print_printing'), wx.OK)
pass
else:
self.m_pd = wx.PrintData( printer.GetPrintDialogData().GetPrintData() )
printout.Destroy()
# ============================================================================
# Test me
# ============================================================================
class MyTestPanel(wx.Panel,iTrade_wxPanelPrint):
def __init__(self, parent):
wx.Panel.__init__(self, parent, -1, wx.DefaultPosition, wx.DefaultSize)
iTrade_wxPanelPrint.__init__(self,parent,MyPrintout)
self.box = wx.BoxSizer(wx.VERTICAL)
from itrade_wxhtml import iTradeHtmlPanel
self.m_canvas = iTradeHtmlPanel(self,wx.NewId(),"http://www.google.fr")
self.m_canvas.paint0()
self.box.Add(self.m_canvas, 1, wx.GROW)
subbox = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(self, -1, "Page Setup")
self.Bind(wx.EVT_BUTTON, self.OnPageSetup, btn)
subbox.Add(btn, 1, wx.GROW | wx.ALL, 2)
btn = wx.Button(self, -1, "Print Preview")
self.Bind(wx.EVT_BUTTON, self.OnPrintPreview, btn)
subbox.Add(btn, 1, wx.GROW | wx.ALL, 2)
btn = wx.Button(self, -1, "Print")
self.Bind(wx.EVT_BUTTON, self.OnDoPrint, btn)
subbox.Add(btn, 1, wx.GROW | wx.ALL, 2)
self.box.Add(subbox, 0, wx.GROW)
self.SetAutoLayout(True)
self.SetSizer(self.box)
class MyTestFrame(wx.Frame):
def __init__(self, parent, id):
wx.Frame.__init__(self, parent, id, "iTrade Print and Preview Module", wx.Point(10,10), wx.Size(400, 400))
self.panel = MyTestPanel(self)
wx.EVT_CLOSE(self, self.OnCloseWindow)
def OnCloseWindow(self, event):
self.Destroy()
class MyTestApp(wx.App):
def OnInit(self):
frame = MyTestFrame(None, -1)
frame.Show(True)
self.SetTopWindow(frame)
return True
if __name__=='__main__':
setLevel(logging.INFO)
app = MyTestApp(0)
app.MainLoop()
# ============================================================================
# That's all folks !
# ============================================================================
|
gpl-3.0
|
rsignell-usgs/PySeidon
|
pyseidon/utilities/interpolation_utils.py
|
2
|
9712
|
#!/usr/bin/python2.7
# encoding: utf-8
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.tri as Tri
import matplotlib.ticker as ticker
from matplotlib.path import Path
from scipy.spatial import KDTree
def closest_point( pt_lon, pt_lat, lon, lat, debug=False):
'''
Finds the closest exact lon, lat centre indexes of an FVCOM class
to given lon, lat coordinates.
Inputs:
- pt_lon = list of longitudes in degrees to find
- pt_lat = list of latitudes in degrees to find
- lon = list of longitudes in degrees to search in
- lat = list of latitudes in degrees to search in
Outputs:
- closest_point_indexes = numpy array of grid indexes
'''
if debug:
print 'Computing closest_point_indexes...'
lonc=lon[:]
latc=lat[:]
points = np.array([pt_lon, pt_lat]).T
point_list = np.array([lonc, latc]).T
#closest_dist = (np.square((point_list[:, 0] - points[:, 0, None])) +
# np.square((point_list[:, 1] - points[:, 1, None])))
#closest_point_indexes = np.argmin(closest_dist, axis=1)
#Wesley's optimized version of this bottleneck
point_list0 = point_list[:, 0]
points0 = points[:, 0, None]
point_list1 = point_list[:, 1]
points1 = points[:, 1, None]
closest_dist = ((point_list0 - points0) *
(point_list0 - points0) +
(point_list1 - points1) *
(point_list1 - points1)
)
closest_point_indexes = np.argmin(closest_dist, axis=1)
#Thomas' optimized version of this bottleneck
#closest_point_indexes = np.zeros(points.shape[0])
#for i in range(points.shape[0]):
# dist=((point_list-points[i,:])**2).sum(axis=1)
# ndx = d.argsort()
# closest_point_indexes[i] = ndx[0]
if debug:
print 'Closest dist: ', closest_dist
if debug:
print 'closest_point_indexes', closest_point_indexes
print '...Passed'
return closest_point_indexes
def interpN_at_pt(var, pt_x, pt_y, xc, yc, index, trinodes,
aw0, awx, awy, debug=False):
"""
Interpol node variable any given variables at any give location.
Inputs:
- var = variable, numpy array, dim=(node) or (time, node) or (time, level, node)
- pt_x = x coordinate in m to find
- pt_y = y coordinate in m to find
- xc = list of x coordinates of var, numpy array, dim= ele
- yc = list of y coordinates of var, numpy array, dim= ele
- trinodes = FVCOM trinodes, numpy array, dim=(3,nele)
- index = index of the nearest element
- aw0, awx, awy = grid parameters
Outputs:
- varInterp = var interpolate at (pt_lon, pt_lat)
"""
if debug:
print 'Interpolating at node...'
n1 = int(trinodes[index,0])
n2 = int(trinodes[index,1])
n3 = int(trinodes[index,2])
x0 = pt_x - xc[index]
y0 = pt_y - yc[index]
if len(var.shape)==1:
var0 = (aw0[0,index] * var[n1]) \
+ (aw0[1,index] * var[n2]) \
+ (aw0[2,index] * var[n3])
varX = (awx[0,index] * var[n1]) \
+ (awx[1,index] * var[n2]) \
+ (awx[2,index] * var[n3])
varY = (awy[0,index] * var[n1]) \
+ (awy[1,index] * var[n2]) \
+ (awy[2,index] * var[n3])
elif len(var.shape)==2:
var0 = (aw0[0,index] * var[:,n1]) \
+ (aw0[1,index] * var[:,n2]) \
+ (aw0[2,index] * var[:,n3])
varX = (awx[0,index] * var[:,n1]) \
+ (awx[1,index] * var[:,n2]) \
+ (awx[2,index] * var[:,n3])
varY = (awy[0,index] * var[:,n1]) \
+ (awy[1,index] * var[:,n2]) \
+ (awy[2,index] * var[:,n3])
else:
var0 = (aw0[0,index] * var[:,:,n1]) \
+ (aw0[1,index] * var[:,:,n2]) \
+ (aw0[2,index] * var[:,:,n3])
varX = (awx[0,index] * var[:,:,n1]) \
+ (awx[1,index] * var[:,:,n2]) \
+ (awx[2,index] * var[:,:,n3])
varY = (awy[0,index] * var[:,:,n1]) \
+ (awy[1,index] * var[:,:,n2]) \
+ (awy[2,index] * var[:,:,n3])
varPt = var0 + (varX * x0) + (varY * y0)
if debug:
if len(var.shape)==1:
zi = varPt
print 'Varpt: ', zi
print '...Passed'
#TR comment: squeeze seems to resolve my problem with pydap
return varPt.squeeze()
def interpE_at_pt(var, pt_x, pt_y, xc, yc, index, triele, trinodes,
a1u, a2u, debug=False):
"""
Interpol node variable any given variables at any give location.
Inputs:
- var = variable, numpy array, dim=(nele) or (time, nele) or (time, level, nele)
- pt_x = x coordinate in m to find
- pt_y = y coordinate in m to find
- xc = list of x coordinates of var, numpy array, dim= nele
- yc = list of y coordinates of var, numpy array, dim= nele
- triele = FVCOM triele, numpy array, dim=(3,nele)
- trinodes = FVCOM trinodes, numpy array, dim=(3,nele)
- index = index of the nearest element
- a1u, a2u = grid parameters
Outputs:
- varInterp = var interpolate at (pt_lon, pt_lat)
"""
if debug:
print 'Interpolating at element...'
n1 = int(triele[index,0])
n2 = int(triele[index,1])
n3 = int(triele[index,2])
#TR comment: not quiet sure what this step does
if n1==0: n1 = trinodes.shape[1]
if n2==0: n2 = trinodes.shape[1]
if n3==0: n3 = trinodes.shape[1]
#TR quick fix: due to error with pydap.proxy.ArrayProxy
# not able to cop with numpy.int
n1 = int(n1)
n2 = int(n2)
n3 = int(n3)
x0 = pt_x - xc[index]
y0 = pt_y - yc[index]
if len(var.shape)==1:
dvardx = (a1u[0,index] * var[index]) \
+ (a1u[1,index] * var[n1]) \
+ (a1u[2,index] * var[n2]) \
+ (a1u[3,index] * var[n3])
dvardy = (a2u[0,index] * var[index]) \
+ (a2u[1,index] * var[n1]) \
+ (a2u[2,index] * var[n2]) \
+ (a2u[3,index] * var[n3])
varPt = var[index] + (dvardx * x0) + (dvardy * y0)
elif len(var.shape)==2:
dvardx = (a1u[0,index] * var[:,index]) \
+ (a1u[1,index] * var[:,n1]) \
+ (a1u[2,index] * var[:,n2]) \
+ (a1u[3,index] * var[:,n3])
dvardy = (a2u[0,index] * var[:,index]) \
+ (a2u[1,index] * var[:,n1]) \
+ (a2u[2,index] * var[:,n2]) \
+ (a2u[3,index] * var[:,n3])
varPt = var[:,index] + (dvardx * x0) + (dvardy * y0)
else:
dvardx = (a1u[0,index] * var[:,:,index]) \
+ (a1u[1,index] * var[:,:,n1]) \
+ (a1u[2,index] * var[:,:,n2]) \
+ (a1u[3,index] * var[:,:,n3])
dvardy = (a2u[0,index] * var[:,:,index]) \
+ (a2u[1,index] * var[:,:,n1]) \
+ (a2u[2,index] * var[:,:,n2]) \
+ (a2u[3,index] * var[:,:,n3])
varPt = var[:,:,index] + (dvardx * x0) + (dvardy * y0)
if debug:
if len(var.shape)==1:
zi = varPt
print 'Varpt: ', zi
print '...Passed'
#TR comment: squeeze seems to resolve my problem with pydap
return varPt.squeeze()
def interp_at_point(var, pt_lon, pt_lat, lon, lat,
index=[], trinodes=[], tri=[], debug=False):
"""
Interpol any given variables at any give location.
Inputs:
------
- var = variable, numpy array, dim=(time, nele or node)
- pt_lon = longitude in degrees to find
- pt_lat = latitude in degrees to find
- lon = list of longitudes of var, numpy array, dim=(nele or node)
- lat = list of latitudes of var, numpy array, dim=(nele or node)
- trinodes = FVCOM trinodes, numpy array, dim=(3,nele)
Outputs:
- varInterp = var interpolate at (pt_lon, pt_lat)
"""
if debug:
print 'Interpolating at point...'
#Finding the right indexes
#Triangulation
#if debug:
# print triIndex, lon[triIndex], lat[triIndex]
triIndex = trinodes[index]
if tri==[]:
tri = Tri.Triangulation(lon[triIndex], lat[triIndex], np.array([[0,1,2]]))
trif = tri.get_trifinder()
trif.__call__(pt_lon, pt_lat)
if debug:
if len(var.shape)==1:
averEl = var[triIndex]
print 'Var', averEl
inter = Tri.LinearTriInterpolator(tri, averEl)
zi = inter(pt_lon, pt_lat)
print 'zi', zi
#Choose the right interpolation depending on the variable
if len(var.shape)==1:
triVar = np.zeros(triIndex.shape)
triVar = var[triIndex]
inter = Tri.LinearTriInterpolator(tri, triVar[:])
varInterp = inter(pt_lon, pt_lat)
elif len(var.shape)==2:
triVar = np.zeros((var.shape[0], triIndex.shape[0]))
triVar = var[:, triIndex]
varInterp = np.ones(triVar.shape[0])
for i in range(triVar.shape[0]):
inter = Tri.LinearTriInterpolator(tri, triVar[i,:])
varInterp[i] = inter(pt_lon, pt_lat)
else:
triVar = np.zeros((var.shape[0], var.shape[1], triIndex.shape[0]))
triVar = var[:, :, triIndex]
varInterp = np.ones(triVar.shape[:-1])
for i in range(triVar.shape[0]):
for j in range(triVar.shape[1]):
inter = Tri.LinearTriInterpolator(tri, triVar[i,j,:])
varInterp[i,j] = inter(pt_lon, pt_lat)
if debug:
print '...Passed'
#TR comment: squeeze seems to resolve my problem with pydap
return varInterp.squeeze()
|
agpl-3.0
|
antoinearnoud/openfisca-france-indirect-taxation
|
openfisca_france_indirect_taxation/build_survey_data/step_1_2_imputations_loyers_proprietaires.py
|
4
|
8619
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <[email protected]>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import os
from ConfigParser import SafeConfigParser
import logging
import pandas
from openfisca_survey_manager.temporary import temporary_store_decorator
from openfisca_survey_manager import default_config_files_directory as config_files_directory
from openfisca_survey_manager.survey_collections import SurveyCollection
log = logging.getLogger(__name__)
# **************************************************************************************************************************
# * Etape n° 0-1-2 : IMPUTATION DE LOYERS POUR LES MENAGES PROPRIETAIRES
# **************************************************************************************************************************
@temporary_store_decorator(config_files_directory = config_files_directory, file_name = 'indirect_taxation_tmp')
def build_imputation_loyers_proprietaires(temporary_store = None, year = None):
"""Build menage consumption by categorie fiscale dataframe """
assert temporary_store is not None
assert year is not None
# Load data
bdf_survey_collection = SurveyCollection.load(collection = 'budget_des_familles',
config_files_directory = config_files_directory)
survey = bdf_survey_collection.get_survey('budget_des_familles_{}'.format(year))
if year == 1995:
imput00 = survey.get_values(table = "socioscm")
# cette étape permet de ne garder que les données dont on est sûr de la qualité et de la véracité
# exdep = 1 si les données sont bien remplies pour les dépenses du ménage
# exrev = 1 si les données sont bien remplies pour les revenus du ménage
imput00 = imput00[(imput00.exdep == 1) & (imput00.exrev == 1)]
imput00 = imput00[(imput00.exdep == 1) & (imput00.exrev == 1)]
kept_variables = ['mena', 'stalog', 'surfhab', 'confort1', 'confort2', 'confort3', 'confort4',
'ancons', 'sitlog', 'nbphab', 'rg', 'cc']
imput00 = imput00[kept_variables]
imput00.rename(columns = {'mena': 'ident_men'}, inplace = True)
#TODO: continue variable cleaning
var_to_filnas = ['surfhab']
for var_to_filna in var_to_filnas:
imput00[var_to_filna] = imput00[var_to_filna].fillna(0)
var_to_ints = ['sitlog', 'confort1', 'stalog', 'surfhab', 'ident_men', 'ancons', 'nbphab']
for var_to_int in var_to_ints:
imput00[var_to_int] = imput00[var_to_int].astype(int)
depenses = temporary_store['depenses_{}'.format(year)]
depenses.reset_index(inplace = True)
depenses_small = depenses[['ident_men', '04110', 'pondmen']].copy()
depenses_small.ident_men = depenses_small.ident_men.astype('int')
imput00 = depenses_small.merge(imput00, on = 'ident_men').set_index('ident_men')
imput00.rename(columns = {'04110': 'loyer_reel'}, inplace = True)
# * une indicatrice pour savoir si le loyer est connu et l'occupant est locataire
imput00['observe'] = (imput00.loyer_reel > 0) & (imput00.stalog.isin([3, 4]))
imput00['maison_appart'] = imput00.sitlog == 1
imput00['catsurf'] = (
1 +
(imput00.surfhab > 15) +
(imput00.surfhab > 30) +
(imput00.surfhab > 40) +
(imput00.surfhab > 60) +
(imput00.surfhab > 80) +
(imput00.surfhab > 100) +
(imput00.surfhab > 150)
)
assert imput00.catsurf.isin(range(1, 9)).all()
# TODO: vérifier ce qe l'on fait notamment regarder la vleur catsurf = 2 ommise dans le code stata
imput00.maison = 1 - ((imput00.cc == 5) & (imput00.catsurf == 1) & (imput00.maison_appart == 1))
imput00.maison = 1 - ((imput00.cc == 5) & (imput00.catsurf == 3) & (imput00.maison_appart == 1))
imput00.maison = 1 - ((imput00.cc == 5) & (imput00.catsurf == 8) & (imput00.maison_appart == 1))
imput00.maison = 1 - ((imput00.cc == 4) & (imput00.catsurf == 1) & (imput00.maison_appart == 1))
try:
parser = SafeConfigParser()
config_local_ini = os.path.join(config_files_directory, 'config_local.ini')
config_ini = os.path.join(config_files_directory, 'config.ini')
parser.read([config_ini, config_local_ini])
directory_path = os.path.normpath(
parser.get("openfisca_france_indirect_taxation", "assets")
)
hotdeck = pandas.read_stata(os.path.join(directory_path, 'hotdeck_result.dta'))
except:
hotdeck = survey.get_values(table = 'hotdeck_result')
imput00.reset_index(inplace = True)
hotdeck.ident_men = hotdeck.ident_men.astype('int')
imput00 = imput00.merge(hotdeck, on = 'ident_men')
imput00.loyer_impute[imput00.observe] = 0
imput00.reset_index(inplace = True)
loyers_imputes = imput00[['ident_men', 'loyer_impute']].copy()
assert loyers_imputes.loyer_impute.notnull().all()
loyers_imputes.rename(columns = dict(loyer_impute = '0411'), inplace = True)
# POUR BdF 2000 ET 2005, ON UTILISE LES LOYERS IMPUTES CALCULES PAR L'INSEE
if year == 2000:
# Garder les loyers imputés (disponibles dans la table sur les ménages)
loyers_imputes = survey.get_values(table = "menage", variables = ['ident', 'rev81'])
loyers_imputes.rename(
columns = {
'ident': 'ident_men',
'rev81': 'poste_coicop_421',
},
inplace = True,
)
if year == 2005:
# Garder les loyers imputés (disponibles dans la table sur les ménages)
loyers_imputes = survey.get_values(table = "menage")
kept_variables = ['ident_men', 'rev801_d']
loyers_imputes = loyers_imputes[kept_variables]
loyers_imputes.rename(columns = {'rev801_d': 'poste_coicop_421'}, inplace = True)
if year == 2011:
try:
loyers_imputes = survey.get_values(table = "MENAGE")
except:
loyers_imputes = survey.get_values(table = "menage")
kept_variables = ['ident_me', 'rev801']
loyers_imputes = loyers_imputes[kept_variables]
loyers_imputes.rename(columns = {'rev801': 'poste_coicop_421', 'ident_me': 'ident_men'},
inplace = True)
# Joindre à la table des dépenses par COICOP
loyers_imputes.set_index('ident_men', inplace = True)
temporary_store['loyers_imputes_{}'.format(year)] = loyers_imputes
depenses = temporary_store['depenses_{}'.format(year)]
depenses.index = depenses.index.astype('int64')
loyers_imputes.index = loyers_imputes.index.astype('int64')
assert set(depenses.index) == set(loyers_imputes.index)
assert len(set(depenses.columns).intersection(set(loyers_imputes.columns))) == 0
depenses = depenses.merge(loyers_imputes, left_index = True, right_index = True)
# ****************************************************************************************************************
# Etape n° 0-1-3 : SAUVER LES BASES DE DEPENSES HOMOGENEISEES DANS LE BON DOSSIER
# ****************************************************************************************************************
# Save in temporary store
temporary_store['depenses_bdf_{}'.format(year)] = depenses
if __name__ == '__main__':
import sys
import time
logging.basicConfig(level = logging.INFO, stream = sys.stdout)
deb = time.clock()
year = 1995
build_imputation_loyers_proprietaires(year = year)
log.info("step 0_1_2_build_imputation_loyers_proprietaires duration is {}".format(time.clock() - deb))
|
agpl-3.0
|
infilect/ml-course1
|
deep-learning-tensorflow/week2/deconvolution_segmentation/convolutional_autoencoder.py
|
2
|
15056
|
import math
import os
import time
from math import ceil
import cv2
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_nn_ops
from imgaug import augmenters as iaa
from imgaug import imgaug
from libs.activations import lrelu
from libs.utils import corrupt
from conv2d import Conv2d
from max_pool_2d import MaxPool2d
import datetime
import io
np.set_printoptions(threshold=np.nan)
class Network:
# image height, width, channels hard coded
# deadling with only RGB images
IMAGE_HEIGHT = 128
IMAGE_WIDTH = 128
IMAGE_CHANNELS = 1
def __init__(self, layers = None, per_image_standardization=True, batch_norm=True, skip_connections=True):
# Define network - ENCODER (decoder will be symmetric).
if layers == None:
layers = []
layers.append(Conv2d(kernel_size=7, strides=[1, 2, 2, 1], output_channels=64, name='conv_1_1'))
layers.append(Conv2d(kernel_size=7, strides=[1, 1, 1, 1], output_channels=64, name='conv_1_2'))
layers.append(MaxPool2d(kernel_size=2, name='max_1', skip_connection=True and skip_connections))
layers.append(Conv2d(kernel_size=7, strides=[1, 2, 2, 1], output_channels=64, name='conv_2_1'))
layers.append(Conv2d(kernel_size=7, strides=[1, 1, 1, 1], output_channels=64, name='conv_2_2'))
layers.append(MaxPool2d(kernel_size=2, name='max_2', skip_connection=True and skip_connections))
layers.append(Conv2d(kernel_size=7, strides=[1, 2, 2, 1], output_channels=64, name='conv_3_1'))
layers.append(Conv2d(kernel_size=7, strides=[1, 1, 1, 1], output_channels=64, name='conv_3_2'))
layers.append(MaxPool2d(kernel_size=2, name='max_3'))
self.inputs = tf.placeholder(tf.float32, [None, self.IMAGE_HEIGHT, self.IMAGE_WIDTH, self.IMAGE_CHANNELS],
name='inputs')
# when output image has multiple dimensions, and each pixel has one of n class predictions
#self.targets = tf.placeholder(tf.int32, [None, self.IMAGE_HEIGHT, self.IMAGE_WIDTH, self.IMAGE_CHANNELS], name='targets')
self.targets = tf.placeholder(tf.float32, [None, self.IMAGE_HEIGHT, self.IMAGE_WIDTH, 1], name='targets')
self.is_training = tf.placeholder_with_default(False, [], name='is_training')
self.description = ""
self.layers = {}
if per_image_standardization:
list_of_images_norm = tf.map_fn(tf.image.per_image_standardization, self.inputs)
net = tf.stack(list_of_images_norm)
else:
net = self.inputs
# ENCODER
for layer in layers:
# layer is Conv2d type, Conv2d has create_layer method
# note how we are passing output of graph net as input to next layer
self.layers[layer.name] = net = layer.create_layer(net)
self.description += "{}".format(layer.get_description())
print("Current input shape: ", net.get_shape())
# just reversing the array
layers.reverse()
Conv2d.reverse_global_variables()
# DECODER
for layer in layers:
# use prev_layer to reverse
net = layer.create_layer_reversed(net, prev_layer=self.layers[layer.name])
self.segmentation_result = tf.sigmoid(net)
# cross entropy loss
# can't use cross entropy since output image is not image mask, rather an image with float values
print('segmentation_result.shape: {}, targets.shape: {}'.format(self.segmentation_result.get_shape(),
self.targets.get_shape()))
# targets_as_classes = tf.reshape(self.targets, [-1, self.IMAGE_HEIGHT, self.IMAGE_WIDTH])
#self.cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.segmentation_result, labels=targets_as_classes))
# MSE loss
self.cost = tf.sqrt(tf.reduce_mean(tf.square(self.segmentation_result - self.targets)))
self.train_op = tf.train.AdamOptimizer().minimize(self.cost)
with tf.name_scope('accuracy'):
argmax_probs = tf.round(self.segmentation_result) # 0x1
correct_pred = tf.cast(tf.equal(argmax_probs, self.targets), tf.float32)
self.accuracy = tf.reduce_mean(correct_pred)
tf.summary.scalar('accuracy', self.accuracy)
self.summaries = tf.summary.merge_all()
class Dataset:
def __init__(self, batch_size, folder='data128_128', include_hair=False):
self.batch_size = batch_size
self.include_hair = include_hair
train_files, validation_files, test_files = self.train_valid_test_split(
os.listdir(os.path.join(folder, 'inputs')))
self.train_inputs, self.train_targets = self.file_paths_to_images(folder, train_files)
self.test_inputs, self.test_targets = self.file_paths_to_images(folder, test_files, True)
self.pointer = 0
def file_paths_to_images(self, folder, files_list, verbose=False):
inputs = []
targets = []
for file in files_list:
input_image = os.path.join(folder, 'inputs', file)
target_image = os.path.join(folder, 'targets' if self.include_hair else 'targets_face_only', file)
test_image = np.array(cv2.imread(input_image, 0)) # load grayscale
# test_image = np.multiply(test_image, 1.0 / 255)
inputs.append(test_image)
target_image = cv2.imread(target_image, 0)
target_image = cv2.threshold(target_image, 127, 1, cv2.THRESH_BINARY)[1]
targets.append(target_image)
return inputs, targets
def train_valid_test_split(self, X, ratio=None):
if ratio is None:
ratio = (0.7, .15, .15)
N = len(X)
return (
X[:int(ceil(N * ratio[0]))],
X[int(ceil(N * ratio[0])): int(ceil(N * ratio[0] + N * ratio[1]))],
X[int(ceil(N * ratio[0] + N * ratio[1])):]
)
def num_batches_in_epoch(self):
return int(math.floor(len(self.train_inputs) / self.batch_size))
def reset_batch_pointer(self):
permutation = np.random.permutation(len(self.train_inputs))
self.train_inputs = [self.train_inputs[i] for i in permutation]
self.train_targets = [self.train_targets[i] for i in permutation]
self.pointer = 0
def next_batch(self):
inputs = []
targets = []
# print(self.batch_size, self.pointer, self.train_inputs.shape, self.train_targets.shape)
for i in range(self.batch_size):
inputs.append(np.array(self.train_inputs[self.pointer + i]))
targets.append(np.array(self.train_targets[self.pointer + i]))
self.pointer += self.batch_size
return np.array(inputs, dtype=np.uint8), np.array(targets, dtype=np.uint8)
@property
def test_set(self):
return np.array(self.test_inputs, dtype=np.uint8), np.array(self.test_targets, dtype=np.uint8)
def draw_results(test_inputs, test_targets, test_segmentation, test_accuracy, network, batch_num):
n_examples_to_plot = 12
fig, axs = plt.subplots(4, n_examples_to_plot, figsize=(n_examples_to_plot * 3, 10))
fig.suptitle("Accuracy: {}, {}".format(test_accuracy, network.description), fontsize=20)
for example_i in range(n_examples_to_plot):
axs[0][example_i].imshow(test_inputs[example_i], cmap='gray')
axs[1][example_i].imshow(test_targets[example_i].astype(np.float32), cmap='gray')
axs[2][example_i].imshow(
np.reshape(test_segmentation[example_i], [network.IMAGE_HEIGHT, network.IMAGE_WIDTH]),
cmap='gray')
test_image_thresholded = np.array(
[0 if x < 0.5 else 255 for x in test_segmentation[example_i].flatten()])
axs[3][example_i].imshow(
np.reshape(test_image_thresholded, [network.IMAGE_HEIGHT, network.IMAGE_WIDTH]),
cmap='gray')
buf = io.BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
IMAGE_PLOT_DIR = 'image_plots/'
if not os.path.exists(IMAGE_PLOT_DIR):
os.makedirs(IMAGE_PLOT_DIR)
plt.savefig('{}/figure{}.jpg'.format(IMAGE_PLOT_DIR, batch_num))
return buf
def train():
BATCH_SIZE = 100
# define the network first
network = Network()
timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S")
# create directory for saving models
os.makedirs(os.path.join('save', network.description, timestamp))
# Dataset class is defined above
dataset = Dataset(folder='data{}_{}'.format(network.IMAGE_HEIGHT, network.IMAGE_WIDTH), include_hair=False,
batch_size=BATCH_SIZE)
inputs, targets = dataset.next_batch()
print(inputs.shape, targets.shape)
augmentation_seq = iaa.Sequential([
iaa.Crop(px=(0, 16), name="Cropper"), # crop images from each side by 0 to 16px (randomly chosen)
iaa.Fliplr(0.5, name="Flipper"),
iaa.GaussianBlur((0, 3.0), name="GaussianBlur"),
iaa.Dropout(0.02, name="Dropout"),
iaa.AdditiveGaussianNoise(scale=0.01 * 255, name="GaussianNoise"),
iaa.Affine(translate_px={"x": (-network.IMAGE_HEIGHT // 3, network.IMAGE_WIDTH // 3)}, name="Affine")
])
# change the activated augmenters for binary masks,
# we only want to execute horizontal crop, flip and affine transformation
def activator_binmasks(images, augmenter, parents, default):
if augmenter.name in ["GaussianBlur", "Dropout", "GaussianNoise"]:
return False
else:
# default value for all other augmenters
return default
hooks_binmasks = imgaug.HooksImages(activator=activator_binmasks)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter('{}/{}-{}'.format('logs', network.description, timestamp),
graph=tf.get_default_graph())
saver = tf.train.Saver(tf.all_variables(), max_to_keep=None)
test_accuracies = []
# Fit all training data
n_epochs = 5 #500
global_start = time.time()
for epoch_i in range(n_epochs):
dataset.reset_batch_pointer()
for batch_i in range(dataset.num_batches_in_epoch()):
batch_num = epoch_i * dataset.num_batches_in_epoch() + batch_i + 1
augmentation_seq_deterministic = augmentation_seq.to_deterministic()
start = time.time()
batch_inputs, batch_targets = dataset.next_batch()
batch_inputs = np.reshape(batch_inputs,
(dataset.batch_size, network.IMAGE_HEIGHT, network.IMAGE_WIDTH, 1))
batch_targets = np.reshape(batch_targets,
(dataset.batch_size, network.IMAGE_HEIGHT, network.IMAGE_WIDTH, 1))
batch_inputs = augmentation_seq_deterministic.augment_images(batch_inputs)
batch_inputs = np.multiply(batch_inputs, 1.0 / 255)
batch_targets = augmentation_seq_deterministic.augment_images(batch_targets, hooks=hooks_binmasks)
cost, _ = sess.run([network.cost, network.train_op],
feed_dict={network.inputs: batch_inputs, network.targets: batch_targets,
network.is_training: True})
end = time.time()
print('{}/{}, epoch: {}, cost: {}, batch time: {}'.format(batch_num,
n_epochs * dataset.num_batches_in_epoch(),
epoch_i, cost, end - start))
if batch_num % 100 == 0 or batch_num == n_epochs * dataset.num_batches_in_epoch():
test_inputs, test_targets = dataset.test_set
# test_inputs, test_targets = test_inputs[:100], test_targets[:100]
test_inputs = np.reshape(test_inputs, (-1, network.IMAGE_HEIGHT, network.IMAGE_WIDTH, 1))
test_targets = np.reshape(test_targets, (-1, network.IMAGE_HEIGHT, network.IMAGE_WIDTH, 1))
test_inputs = np.multiply(test_inputs, 1.0 / 255)
print(test_inputs.shape)
summary, test_accuracy = sess.run([network.summaries, network.accuracy],
feed_dict={network.inputs: test_inputs,
network.targets: test_targets,
network.is_training: False})
summary_writer.add_summary(summary, batch_num)
print('Step {}, test accuracy: {}'.format(batch_num, test_accuracy))
test_accuracies.append((test_accuracy, batch_num))
print("Accuracies in time: ", [test_accuracies[x][0] for x in range(len(test_accuracies))])
max_acc = max(test_accuracies)
print("Best accuracy: {} in batch {}".format(max_acc[0], max_acc[1]))
print("Total time: {}".format(time.time() - global_start))
# Plot example reconstructions
n_examples = 12
test_inputs, test_targets = dataset.test_inputs[:n_examples], dataset.test_targets[:n_examples]
test_inputs = np.multiply(test_inputs, 1.0 / 255)
test_segmentation = sess.run(network.segmentation_result, feed_dict={
network.inputs: np.reshape(test_inputs,
[n_examples, network.IMAGE_HEIGHT, network.IMAGE_WIDTH, 1])})
# Prepare the plot
test_plot_buf = draw_results(test_inputs, test_targets, test_segmentation, test_accuracy, network,
batch_num)
# Convert PNG buffer to TF image
image = tf.image.decode_png(test_plot_buf.getvalue(), channels=4)
# Add the batch dimension
image = tf.expand_dims(image, 0)
# Add image summary
image_summary_op = tf.summary.image("plot", image)
image_summary = sess.run(image_summary_op)
summary_writer.add_summary(image_summary)
if test_accuracy >= max_acc[0]:
checkpoint_path = os.path.join('save', network.description, timestamp, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=batch_num)
if __name__ == '__main__':
train()
|
mit
|
kdebrab/pandas
|
pandas/tests/io/test_excel.py
|
2
|
96935
|
# pylint: disable=E1101
import os
import warnings
from datetime import datetime, date, time, timedelta
from distutils.version import LooseVersion
from functools import partial
from warnings import catch_warnings
from collections import OrderedDict
import numpy as np
import pytest
from numpy import nan
import pandas as pd
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas import DataFrame, Index, MultiIndex
from pandas.compat import u, range, map, BytesIO, iteritems, PY36
from pandas.core.config import set_option, get_option
from pandas.io.common import URLError
from pandas.io.excel import (
ExcelFile, ExcelWriter, read_excel, _XlwtWriter, _OpenpyxlWriter,
register_writer, _XlsxWriter
)
from pandas.io.formats.excel import ExcelFormatter
from pandas.io.parsers import read_csv
from pandas.util.testing import ensure_clean, makeCustomDataframe as mkdf
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)[:10]
_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])[:10]
_tsframe = tm.makeTimeDataFrame()[:5]
_mixed_frame = _frame.copy()
_mixed_frame['foo'] = 'bar'
@td.skip_if_no('xlrd', '0.9')
class SharedItems(object):
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.dirpath = datapath("io", "data")
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
def get_csv_refdf(self, basename):
"""
Obtain the reference data from read_csv with the Python engine.
Parameters
----------
basename : str
File base name, excluding file extension.
Returns
-------
dfref : DataFrame
"""
pref = os.path.join(self.dirpath, basename + '.csv')
dfref = read_csv(pref, index_col=0, parse_dates=True, engine='python')
return dfref
def get_excelfile(self, basename, ext):
"""
Return test data ExcelFile instance.
Parameters
----------
basename : str
File base name, excluding file extension.
Returns
-------
excel : io.excel.ExcelFile
"""
return ExcelFile(os.path.join(self.dirpath, basename + ext))
def get_exceldf(self, basename, ext, *args, **kwds):
"""
Return test data DataFrame.
Parameters
----------
basename : str
File base name, excluding file extension.
Returns
-------
df : DataFrame
"""
pth = os.path.join(self.dirpath, basename + ext)
return read_excel(pth, *args, **kwds)
class ReadingTestsBase(SharedItems):
# This is based on ExcelWriterBase
def test_usecols_int(self, ext):
dfref = self.get_csv_refdf('test1')
dfref = dfref.reindex(columns=['A', 'B', 'C'])
df1 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0, usecols=3)
df2 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, usecols=3)
with tm.assert_produces_warning(FutureWarning):
df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, parse_cols=3)
# TODO add index to xls file)
tm.assert_frame_equal(df1, dfref, check_names=False)
tm.assert_frame_equal(df2, dfref, check_names=False)
tm.assert_frame_equal(df3, dfref, check_names=False)
def test_usecols_list(self, ext):
dfref = self.get_csv_refdf('test1')
dfref = dfref.reindex(columns=['B', 'C'])
df1 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
usecols=[0, 2, 3])
df2 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, usecols=[0, 2, 3])
with tm.assert_produces_warning(FutureWarning):
df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, parse_cols=[0, 2, 3])
# TODO add index to xls file)
tm.assert_frame_equal(df1, dfref, check_names=False)
tm.assert_frame_equal(df2, dfref, check_names=False)
tm.assert_frame_equal(df3, dfref, check_names=False)
def test_usecols_str(self, ext):
dfref = self.get_csv_refdf('test1')
df1 = dfref.reindex(columns=['A', 'B', 'C'])
df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
usecols='A:D')
df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, usecols='A:D')
with tm.assert_produces_warning(FutureWarning):
df4 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, parse_cols='A:D')
# TODO add index to xls, read xls ignores index name ?
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
tm.assert_frame_equal(df4, df1, check_names=False)
df1 = dfref.reindex(columns=['B', 'C'])
df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
usecols='A,C,D')
df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, usecols='A,C,D')
# TODO add index to xls file
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
df1 = dfref.reindex(columns=['B', 'C'])
df2 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
usecols='A,C:D')
df3 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0, usecols='A,C:D')
tm.assert_frame_equal(df2, df1, check_names=False)
tm.assert_frame_equal(df3, df1, check_names=False)
def test_excel_stop_iterator(self, ext):
parsed = self.get_exceldf('test2', ext, 'Sheet1')
expected = DataFrame([['aaaa', 'bbbbb']], columns=['Test', 'Test1'])
tm.assert_frame_equal(parsed, expected)
def test_excel_cell_error_na(self, ext):
parsed = self.get_exceldf('test3', ext, 'Sheet1')
expected = DataFrame([[np.nan]], columns=['Test'])
tm.assert_frame_equal(parsed, expected)
def test_excel_passes_na(self, ext):
excel = self.get_excelfile('test4', ext)
parsed = read_excel(excel, 'Sheet1', keep_default_na=False,
na_values=['apple'])
expected = DataFrame([['NA'], [1], ['NA'], [np.nan], ['rabbit']],
columns=['Test'])
tm.assert_frame_equal(parsed, expected)
parsed = read_excel(excel, 'Sheet1', keep_default_na=True,
na_values=['apple'])
expected = DataFrame([[np.nan], [1], [np.nan], [np.nan], ['rabbit']],
columns=['Test'])
tm.assert_frame_equal(parsed, expected)
# 13967
excel = self.get_excelfile('test5', ext)
parsed = read_excel(excel, 'Sheet1', keep_default_na=False,
na_values=['apple'])
expected = DataFrame([['1.#QNAN'], [1], ['nan'], [np.nan], ['rabbit']],
columns=['Test'])
tm.assert_frame_equal(parsed, expected)
parsed = read_excel(excel, 'Sheet1', keep_default_na=True,
na_values=['apple'])
expected = DataFrame([[np.nan], [1], [np.nan], [np.nan], ['rabbit']],
columns=['Test'])
tm.assert_frame_equal(parsed, expected)
def test_deprecated_sheetname(self, ext):
# gh-17964
excel = self.get_excelfile('test1', ext)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
read_excel(excel, sheetname='Sheet1')
with pytest.raises(TypeError):
read_excel(excel, sheet='Sheet1')
def test_excel_table_sheet_by_index(self, ext):
excel = self.get_excelfile('test1', ext)
dfref = self.get_csv_refdf('test1')
df1 = read_excel(excel, 0, index_col=0)
df2 = read_excel(excel, 1, skiprows=[1], index_col=0)
tm.assert_frame_equal(df1, dfref, check_names=False)
tm.assert_frame_equal(df2, dfref, check_names=False)
df1 = excel.parse(0, index_col=0)
df2 = excel.parse(1, skiprows=[1], index_col=0)
tm.assert_frame_equal(df1, dfref, check_names=False)
tm.assert_frame_equal(df2, dfref, check_names=False)
df3 = read_excel(excel, 0, index_col=0, skipfooter=1)
tm.assert_frame_equal(df3, df1.iloc[:-1])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df4 = read_excel(excel, 0, index_col=0, skip_footer=1)
tm.assert_frame_equal(df3, df4)
df3 = excel.parse(0, index_col=0, skipfooter=1)
tm.assert_frame_equal(df3, df1.iloc[:-1])
import xlrd
with pytest.raises(xlrd.XLRDError):
read_excel(excel, 'asdf')
def test_excel_table(self, ext):
dfref = self.get_csv_refdf('test1')
df1 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0)
df2 = self.get_exceldf('test1', ext, 'Sheet2', skiprows=[1],
index_col=0)
# TODO add index to file
tm.assert_frame_equal(df1, dfref, check_names=False)
tm.assert_frame_equal(df2, dfref, check_names=False)
df3 = self.get_exceldf('test1', ext, 'Sheet1', index_col=0,
skipfooter=1)
tm.assert_frame_equal(df3, df1.iloc[:-1])
def test_reader_special_dtypes(self, ext):
expected = DataFrame.from_dict(OrderedDict([
("IntCol", [1, 2, -3, 4, 0]),
("FloatCol", [1.25, 2.25, 1.83, 1.92, 0.0000000005]),
("BoolCol", [True, False, True, True, False]),
("StrCol", [1, 2, 3, 4, 5]),
# GH5394 - this is why convert_float isn't vectorized
("Str2Col", ["a", 3, "c", "d", "e"]),
("DateCol", [datetime(2013, 10, 30), datetime(2013, 10, 31),
datetime(1905, 1, 1), datetime(2013, 12, 14),
datetime(2015, 3, 14)])
]))
basename = 'test_types'
# should read in correctly and infer types
actual = self.get_exceldf(basename, ext, 'Sheet1')
tm.assert_frame_equal(actual, expected)
# if not coercing number, then int comes in as float
float_expected = expected.copy()
float_expected["IntCol"] = float_expected["IntCol"].astype(float)
float_expected.loc[float_expected.index[1], "Str2Col"] = 3.0
actual = self.get_exceldf(basename, ext, 'Sheet1', convert_float=False)
tm.assert_frame_equal(actual, float_expected)
# check setting Index (assuming xls and xlsx are the same here)
for icol, name in enumerate(expected.columns):
actual = self.get_exceldf(basename, ext, 'Sheet1', index_col=icol)
exp = expected.set_index(name)
tm.assert_frame_equal(actual, exp)
# convert_float and converters should be different but both accepted
expected["StrCol"] = expected["StrCol"].apply(str)
actual = self.get_exceldf(
basename, ext, 'Sheet1', converters={"StrCol": str})
tm.assert_frame_equal(actual, expected)
no_convert_float = float_expected.copy()
no_convert_float["StrCol"] = no_convert_float["StrCol"].apply(str)
actual = self.get_exceldf(basename, ext, 'Sheet1', convert_float=False,
converters={"StrCol": str})
tm.assert_frame_equal(actual, no_convert_float)
# GH8212 - support for converters and missing values
def test_reader_converters(self, ext):
basename = 'test_converters'
expected = DataFrame.from_dict(OrderedDict([
("IntCol", [1, 2, -3, -1000, 0]),
("FloatCol", [12.5, np.nan, 18.3, 19.2, 0.000000005]),
("BoolCol", ['Found', 'Found', 'Found', 'Not found', 'Found']),
("StrCol", ['1', np.nan, '3', '4', '5']),
]))
converters = {'IntCol': lambda x: int(x) if x != '' else -1000,
'FloatCol': lambda x: 10 * x if x else np.nan,
2: lambda x: 'Found' if x != '' else 'Not found',
3: lambda x: str(x) if x else '',
}
# should read in correctly and set types of single cells (not array
# dtypes)
actual = self.get_exceldf(basename, ext, 'Sheet1',
converters=converters)
tm.assert_frame_equal(actual, expected)
def test_reader_dtype(self, ext):
# GH 8212
basename = 'testdtype'
actual = self.get_exceldf(basename, ext)
expected = DataFrame({
'a': [1, 2, 3, 4],
'b': [2.5, 3.5, 4.5, 5.5],
'c': [1, 2, 3, 4],
'd': [1.0, 2.0, np.nan, 4.0]}).reindex(
columns=['a', 'b', 'c', 'd'])
tm.assert_frame_equal(actual, expected)
actual = self.get_exceldf(basename, ext,
dtype={'a': 'float64',
'b': 'float32',
'c': str})
expected['a'] = expected['a'].astype('float64')
expected['b'] = expected['b'].astype('float32')
expected['c'] = ['001', '002', '003', '004']
tm.assert_frame_equal(actual, expected)
with pytest.raises(ValueError):
actual = self.get_exceldf(basename, ext, dtype={'d': 'int64'})
def test_reading_all_sheets(self, ext):
# Test reading all sheetnames by setting sheetname to None,
# Ensure a dict is returned.
# See PR #9450
basename = 'test_multisheet'
dfs = self.get_exceldf(basename, ext, sheet_name=None)
# ensure this is not alphabetical to test order preservation
expected_keys = ['Charlie', 'Alpha', 'Beta']
tm.assert_contains_all(expected_keys, dfs.keys())
# Issue 9930
# Ensure sheet order is preserved
assert expected_keys == list(dfs.keys())
def test_reading_multiple_specific_sheets(self, ext):
# Test reading specific sheetnames by specifying a mixed list
# of integers and strings, and confirm that duplicated sheet
# references (positions/names) are removed properly.
# Ensure a dict is returned
# See PR #9450
basename = 'test_multisheet'
# Explicitly request duplicates. Only the set should be returned.
expected_keys = [2, 'Charlie', 'Charlie']
dfs = self.get_exceldf(basename, ext, sheet_name=expected_keys)
expected_keys = list(set(expected_keys))
tm.assert_contains_all(expected_keys, dfs.keys())
assert len(expected_keys) == len(dfs.keys())
def test_reading_all_sheets_with_blank(self, ext):
# Test reading all sheetnames by setting sheetname to None,
# In the case where some sheets are blank.
# Issue #11711
basename = 'blank_with_header'
dfs = self.get_exceldf(basename, ext, sheet_name=None)
expected_keys = ['Sheet1', 'Sheet2', 'Sheet3']
tm.assert_contains_all(expected_keys, dfs.keys())
# GH6403
def test_read_excel_blank(self, ext):
actual = self.get_exceldf('blank', ext, 'Sheet1')
tm.assert_frame_equal(actual, DataFrame())
def test_read_excel_blank_with_header(self, ext):
expected = DataFrame(columns=['col_1', 'col_2'])
actual = self.get_exceldf('blank_with_header', ext, 'Sheet1')
tm.assert_frame_equal(actual, expected)
@td.skip_if_no('openpyxl')
@td.skip_if_no('xlwt')
# GH 12292 : error when read one empty column from excel file
def test_read_one_empty_col_no_header(self, ext):
df = pd.DataFrame(
[["", 1, 100],
["", 2, 200],
["", 3, 300],
["", 4, 400]]
)
with ensure_clean(ext) as path:
df.to_excel(path, 'no_header', index=False, header=False)
actual_header_none = read_excel(
path,
'no_header',
usecols=[0],
header=None
)
actual_header_zero = read_excel(
path,
'no_header',
usecols=[0],
header=0
)
expected = DataFrame()
tm.assert_frame_equal(actual_header_none, expected)
tm.assert_frame_equal(actual_header_zero, expected)
@td.skip_if_no('openpyxl')
@td.skip_if_no('xlwt')
def test_read_one_empty_col_with_header(self, ext):
df = pd.DataFrame(
[["", 1, 100],
["", 2, 200],
["", 3, 300],
["", 4, 400]]
)
with ensure_clean(ext) as path:
df.to_excel(path, 'with_header', index=False, header=True)
actual_header_none = read_excel(
path,
'with_header',
usecols=[0],
header=None
)
actual_header_zero = read_excel(
path,
'with_header',
usecols=[0],
header=0
)
expected_header_none = DataFrame(pd.Series([0], dtype='int64'))
tm.assert_frame_equal(actual_header_none, expected_header_none)
expected_header_zero = DataFrame(columns=[0])
tm.assert_frame_equal(actual_header_zero, expected_header_zero)
@td.skip_if_no('openpyxl')
@td.skip_if_no('xlwt')
def test_set_column_names_in_parameter(self, ext):
# GH 12870 : pass down column names associated with
# keyword argument names
refdf = pd.DataFrame([[1, 'foo'], [2, 'bar'],
[3, 'baz']], columns=['a', 'b'])
with ensure_clean(ext) as pth:
with ExcelWriter(pth) as writer:
refdf.to_excel(writer, 'Data_no_head',
header=False, index=False)
refdf.to_excel(writer, 'Data_with_head', index=False)
refdf.columns = ['A', 'B']
with ExcelFile(pth) as reader:
xlsdf_no_head = read_excel(reader, 'Data_no_head',
header=None, names=['A', 'B'])
xlsdf_with_head = read_excel(reader, 'Data_with_head',
index_col=None, names=['A', 'B'])
tm.assert_frame_equal(xlsdf_no_head, refdf)
tm.assert_frame_equal(xlsdf_with_head, refdf)
def test_date_conversion_overflow(self, ext):
# GH 10001 : pandas.ExcelFile ignore parse_dates=False
expected = pd.DataFrame([[pd.Timestamp('2016-03-12'), 'Marc Johnson'],
[pd.Timestamp('2016-03-16'), 'Jack Black'],
[1e+20, 'Timothy Brown']],
columns=['DateColWithBigInt', 'StringCol'])
result = self.get_exceldf('testdateoverflow', ext)
tm.assert_frame_equal(result, expected)
def test_sheet_name_and_sheetname(self, ext):
# GH10559: Minor improvement: Change "sheet_name" to "sheetname"
# GH10969: DOC: Consistent var names (sheetname vs sheet_name)
# GH12604: CLN GH10559 Rename sheetname variable to sheet_name
# GH20920: ExcelFile.parse() and pd.read_xlsx() have different
# behavior for "sheetname" argument
dfref = self.get_csv_refdf('test1')
df1 = self.get_exceldf('test1', ext,
sheet_name='Sheet1') # doc
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df2 = self.get_exceldf('test1', ext,
sheetname='Sheet1') # bkwrd compat
excel = self.get_excelfile('test1', ext)
df1_parse = excel.parse(sheet_name='Sheet1') # doc
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df2_parse = excel.parse(sheetname='Sheet1') # bkwrd compat
tm.assert_frame_equal(df1, dfref, check_names=False)
tm.assert_frame_equal(df2, dfref, check_names=False)
tm.assert_frame_equal(df1_parse, dfref, check_names=False)
tm.assert_frame_equal(df2_parse, dfref, check_names=False)
def test_sheet_name_both_raises(self, ext):
with tm.assert_raises_regex(TypeError, "Cannot specify both"):
self.get_exceldf('test1', ext, sheetname='Sheet1',
sheet_name='Sheet1')
excel = self.get_excelfile('test1', ext)
with tm.assert_raises_regex(TypeError, "Cannot specify both"):
excel.parse(sheetname='Sheet1',
sheet_name='Sheet1')
@pytest.mark.parametrize("ext", ['.xls', '.xlsx', '.xlsm'])
class TestXlrdReader(ReadingTestsBase):
"""
This is the base class for the xlrd tests, and 3 different file formats
are supported: xls, xlsx, xlsm
"""
def test_excel_read_buffer(self, ext):
pth = os.path.join(self.dirpath, 'test1' + ext)
expected = read_excel(pth, 'Sheet1', index_col=0)
with open(pth, 'rb') as f:
actual = read_excel(f, 'Sheet1', index_col=0)
tm.assert_frame_equal(expected, actual)
with open(pth, 'rb') as f:
xls = ExcelFile(f)
actual = read_excel(xls, 'Sheet1', index_col=0)
tm.assert_frame_equal(expected, actual)
@td.skip_if_no('xlwt')
def test_read_xlrd_Book(self, ext):
import xlrd
df = self.frame
with ensure_clean('.xls') as pth:
df.to_excel(pth, "SheetA")
book = xlrd.open_workbook(pth)
with ExcelFile(book, engine="xlrd") as xl:
result = read_excel(xl, "SheetA")
tm.assert_frame_equal(df, result)
result = read_excel(book, sheet_name="SheetA", engine="xlrd")
tm.assert_frame_equal(df, result)
@tm.network
def test_read_from_http_url(self, ext):
url = ('https://raw.github.com/pandas-dev/pandas/master/'
'pandas/tests/io/data/test1' + ext)
url_table = read_excel(url)
local_table = self.get_exceldf('test1', ext)
tm.assert_frame_equal(url_table, local_table)
@td.skip_if_no('s3fs')
def test_read_from_s3_url(self, ext):
boto3 = pytest.importorskip('boto3')
moto = pytest.importorskip('moto')
with moto.mock_s3():
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket="pandas-test")
file_name = os.path.join(self.dirpath, 'test1' + ext)
with open(file_name, 'rb') as f:
conn.Bucket("pandas-test").put_object(Key="test1" + ext,
Body=f)
url = ('s3://pandas-test/test1' + ext)
url_table = read_excel(url)
local_table = self.get_exceldf('test1', ext)
tm.assert_frame_equal(url_table, local_table)
@pytest.mark.slow
def test_read_from_file_url(self, ext):
# FILE
localtable = os.path.join(self.dirpath, 'test1' + ext)
local_table = read_excel(localtable)
try:
url_table = read_excel('file://localhost/' + localtable)
except URLError:
# fails on some systems
import platform
pytest.skip("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
@td.skip_if_no('pathlib')
def test_read_from_pathlib_path(self, ext):
# GH12655
from pathlib import Path
str_path = os.path.join(self.dirpath, 'test1' + ext)
expected = read_excel(str_path, 'Sheet1', index_col=0)
path_obj = Path(self.dirpath, 'test1' + ext)
actual = read_excel(path_obj, 'Sheet1', index_col=0)
tm.assert_frame_equal(expected, actual)
@td.skip_if_no('py.path')
def test_read_from_py_localpath(self, ext):
# GH12655
from py.path import local as LocalPath
str_path = os.path.join(self.dirpath, 'test1' + ext)
expected = read_excel(str_path, 'Sheet1', index_col=0)
abs_dir = os.path.abspath(self.dirpath)
path_obj = LocalPath(abs_dir).join('test1' + ext)
actual = read_excel(path_obj, 'Sheet1', index_col=0)
tm.assert_frame_equal(expected, actual)
def test_reader_closes_file(self, ext):
pth = os.path.join(self.dirpath, 'test1' + ext)
f = open(pth, 'rb')
with ExcelFile(f) as xlsx:
# parses okay
read_excel(xlsx, 'Sheet1', index_col=0)
assert f.closed
@td.skip_if_no('openpyxl')
@td.skip_if_no('xlwt')
def test_creating_and_reading_multiple_sheets(self, ext):
# Test reading multiple sheets, from a runtime created excel file
# with multiple sheets.
# See PR #9450
def tdf(sheetname):
d, i = [11, 22, 33], [1, 2, 3]
return DataFrame(d, i, columns=[sheetname])
sheets = ['AAA', 'BBB', 'CCC']
dfs = [tdf(s) for s in sheets]
dfs = dict(zip(sheets, dfs))
with ensure_clean(ext) as pth:
with ExcelWriter(pth) as ew:
for sheetname, df in iteritems(dfs):
df.to_excel(ew, sheetname)
dfs_returned = read_excel(pth, sheet_name=sheets)
for s in sheets:
tm.assert_frame_equal(dfs[s], dfs_returned[s])
def test_reader_seconds(self, ext):
import xlrd
# Test reading times with and without milliseconds. GH5945.
if LooseVersion(xlrd.__VERSION__) >= LooseVersion("0.9.3"):
# Xlrd >= 0.9.3 can handle Excel milliseconds.
expected = DataFrame.from_dict({"Time": [time(1, 2, 3),
time(2, 45, 56, 100000),
time(4, 29, 49, 200000),
time(6, 13, 42, 300000),
time(7, 57, 35, 400000),
time(9, 41, 28, 500000),
time(11, 25, 21, 600000),
time(13, 9, 14, 700000),
time(14, 53, 7, 800000),
time(16, 37, 0, 900000),
time(18, 20, 54)]})
else:
# Xlrd < 0.9.3 rounds Excel milliseconds.
expected = DataFrame.from_dict({"Time": [time(1, 2, 3),
time(2, 45, 56),
time(4, 29, 49),
time(6, 13, 42),
time(7, 57, 35),
time(9, 41, 29),
time(11, 25, 22),
time(13, 9, 15),
time(14, 53, 8),
time(16, 37, 1),
time(18, 20, 54)]})
actual = self.get_exceldf('times_1900', ext, 'Sheet1')
tm.assert_frame_equal(actual, expected)
actual = self.get_exceldf('times_1904', ext, 'Sheet1')
tm.assert_frame_equal(actual, expected)
def test_read_excel_multiindex(self, ext):
# GH 4679
mi = MultiIndex.from_product([['foo', 'bar'], ['a', 'b']])
mi_file = os.path.join(self.dirpath, 'testmultiindex' + ext)
expected = DataFrame([[1, 2.5, pd.Timestamp('2015-01-01'), True],
[2, 3.5, pd.Timestamp('2015-01-02'), False],
[3, 4.5, pd.Timestamp('2015-01-03'), False],
[4, 5.5, pd.Timestamp('2015-01-04'), True]],
columns=mi)
actual = read_excel(mi_file, 'mi_column', header=[0, 1])
tm.assert_frame_equal(actual, expected)
actual = read_excel(mi_file, 'mi_column', header=[0, 1], index_col=0)
tm.assert_frame_equal(actual, expected)
expected.columns = ['a', 'b', 'c', 'd']
expected.index = mi
actual = read_excel(mi_file, 'mi_index', index_col=[0, 1])
tm.assert_frame_equal(actual, expected, check_names=False)
expected.columns = mi
actual = read_excel(mi_file, 'both', index_col=[0, 1], header=[0, 1])
tm.assert_frame_equal(actual, expected, check_names=False)
expected.index = mi.set_names(['ilvl1', 'ilvl2'])
expected.columns = ['a', 'b', 'c', 'd']
actual = read_excel(mi_file, 'mi_index_name', index_col=[0, 1])
tm.assert_frame_equal(actual, expected)
expected.index = list(range(4))
expected.columns = mi.set_names(['c1', 'c2'])
actual = read_excel(mi_file, 'mi_column_name',
header=[0, 1], index_col=0)
tm.assert_frame_equal(actual, expected)
# Issue #11317
expected.columns = mi.set_levels(
[1, 2], level=1).set_names(['c1', 'c2'])
actual = read_excel(mi_file, 'name_with_int',
index_col=0, header=[0, 1])
tm.assert_frame_equal(actual, expected)
expected.columns = mi.set_names(['c1', 'c2'])
expected.index = mi.set_names(['ilvl1', 'ilvl2'])
actual = read_excel(mi_file, 'both_name',
index_col=[0, 1], header=[0, 1])
tm.assert_frame_equal(actual, expected)
actual = read_excel(mi_file, 'both_name',
index_col=[0, 1], header=[0, 1])
tm.assert_frame_equal(actual, expected)
actual = read_excel(mi_file, 'both_name_skiprows', index_col=[0, 1],
header=[0, 1], skiprows=2)
tm.assert_frame_equal(actual, expected)
@td.skip_if_no('xlsxwriter')
def test_read_excel_multiindex_empty_level(self, ext):
# GH 12453
with ensure_clean('.xlsx') as path:
df = DataFrame({
('One', 'x'): {0: 1},
('Two', 'X'): {0: 3},
('Two', 'Y'): {0: 7},
('Zero', ''): {0: 0}
})
expected = DataFrame({
('One', u'x'): {0: 1},
('Two', u'X'): {0: 3},
('Two', u'Y'): {0: 7},
('Zero', 'Unnamed: 3_level_1'): {0: 0}
})
df.to_excel(path)
actual = pd.read_excel(path, header=[0, 1])
tm.assert_frame_equal(actual, expected)
df = pd.DataFrame({
('Beg', ''): {0: 0},
('Middle', 'x'): {0: 1},
('Tail', 'X'): {0: 3},
('Tail', 'Y'): {0: 7}
})
expected = pd.DataFrame({
('Beg', 'Unnamed: 0_level_1'): {0: 0},
('Middle', u'x'): {0: 1},
('Tail', u'X'): {0: 3},
('Tail', u'Y'): {0: 7}
})
df.to_excel(path)
actual = pd.read_excel(path, header=[0, 1])
tm.assert_frame_equal(actual, expected)
@td.skip_if_no('xlsxwriter')
def test_excel_multindex_roundtrip(self, ext):
# GH 4679
with ensure_clean('.xlsx') as pth:
for c_idx_names in [True, False]:
for r_idx_names in [True, False]:
for c_idx_levels in [1, 3]:
for r_idx_levels in [1, 3]:
# column index name can't be serialized unless
# MultiIndex
if (c_idx_levels == 1 and c_idx_names):
continue
# empty name case current read in as unnamed
# levels, not Nones
check_names = True
if not r_idx_names and r_idx_levels > 1:
check_names = False
df = mkdf(5, 5, c_idx_names,
r_idx_names, c_idx_levels,
r_idx_levels)
df.to_excel(pth)
act = pd.read_excel(
pth, index_col=list(range(r_idx_levels)),
header=list(range(c_idx_levels)))
tm.assert_frame_equal(
df, act, check_names=check_names)
df.iloc[0, :] = np.nan
df.to_excel(pth)
act = pd.read_excel(
pth, index_col=list(range(r_idx_levels)),
header=list(range(c_idx_levels)))
tm.assert_frame_equal(
df, act, check_names=check_names)
df.iloc[-1, :] = np.nan
df.to_excel(pth)
act = pd.read_excel(
pth, index_col=list(range(r_idx_levels)),
header=list(range(c_idx_levels)))
tm.assert_frame_equal(
df, act, check_names=check_names)
def test_excel_old_index_format(self, ext):
# see gh-4679
filename = 'test_index_name_pre17' + ext
in_file = os.path.join(self.dirpath, filename)
# We detect headers to determine if index names exist, so
# that "index" name in the "names" version of the data will
# now be interpreted as rows that include null data.
data = np.array([[None, None, None, None, None],
['R0C0', 'R0C1', 'R0C2', 'R0C3', 'R0C4'],
['R1C0', 'R1C1', 'R1C2', 'R1C3', 'R1C4'],
['R2C0', 'R2C1', 'R2C2', 'R2C3', 'R2C4'],
['R3C0', 'R3C1', 'R3C2', 'R3C3', 'R3C4'],
['R4C0', 'R4C1', 'R4C2', 'R4C3', 'R4C4']])
columns = ['C_l0_g0', 'C_l0_g1', 'C_l0_g2', 'C_l0_g3', 'C_l0_g4']
mi = MultiIndex(levels=[['R0', 'R_l0_g0', 'R_l0_g1',
'R_l0_g2', 'R_l0_g3', 'R_l0_g4'],
['R1', 'R_l1_g0', 'R_l1_g1',
'R_l1_g2', 'R_l1_g3', 'R_l1_g4']],
labels=[[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]],
names=[None, None])
si = Index(['R0', 'R_l0_g0', 'R_l0_g1', 'R_l0_g2',
'R_l0_g3', 'R_l0_g4'], name=None)
expected = pd.DataFrame(data, index=si, columns=columns)
actual = pd.read_excel(in_file, 'single_names')
tm.assert_frame_equal(actual, expected)
expected.index = mi
actual = pd.read_excel(in_file, 'multi_names')
tm.assert_frame_equal(actual, expected)
# The analogous versions of the "names" version data
# where there are explicitly no names for the indices.
data = np.array([['R0C0', 'R0C1', 'R0C2', 'R0C3', 'R0C4'],
['R1C0', 'R1C1', 'R1C2', 'R1C3', 'R1C4'],
['R2C0', 'R2C1', 'R2C2', 'R2C3', 'R2C4'],
['R3C0', 'R3C1', 'R3C2', 'R3C3', 'R3C4'],
['R4C0', 'R4C1', 'R4C2', 'R4C3', 'R4C4']])
columns = ['C_l0_g0', 'C_l0_g1', 'C_l0_g2', 'C_l0_g3', 'C_l0_g4']
mi = MultiIndex(levels=[['R_l0_g0', 'R_l0_g1', 'R_l0_g2',
'R_l0_g3', 'R_l0_g4'],
['R_l1_g0', 'R_l1_g1', 'R_l1_g2',
'R_l1_g3', 'R_l1_g4']],
labels=[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]],
names=[None, None])
si = Index(['R_l0_g0', 'R_l0_g1', 'R_l0_g2',
'R_l0_g3', 'R_l0_g4'], name=None)
expected = pd.DataFrame(data, index=si, columns=columns)
actual = pd.read_excel(in_file, 'single_no_names')
tm.assert_frame_equal(actual, expected)
expected.index = mi
actual = pd.read_excel(in_file, 'multi_no_names', index_col=[0, 1])
tm.assert_frame_equal(actual, expected, check_names=False)
def test_read_excel_bool_header_arg(self, ext):
# GH 6114
for arg in [True, False]:
with pytest.raises(TypeError):
pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
header=arg)
def test_read_excel_chunksize(self, ext):
# GH 8011
with pytest.raises(NotImplementedError):
pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
chunksize=100)
@td.skip_if_no('openpyxl')
@td.skip_if_no('xlwt')
def test_read_excel_parse_dates(self, ext):
# GH 11544, 12051
df = DataFrame(
{'col': [1, 2, 3],
'date_strings': pd.date_range('2012-01-01', periods=3)})
df2 = df.copy()
df2['date_strings'] = df2['date_strings'].dt.strftime('%m/%d/%Y')
with ensure_clean(ext) as pth:
df2.to_excel(pth)
res = read_excel(pth)
tm.assert_frame_equal(df2, res)
# no index_col specified when parse_dates is True
with tm.assert_produces_warning():
res = read_excel(pth, parse_dates=True)
tm.assert_frame_equal(df2, res)
res = read_excel(pth, parse_dates=['date_strings'], index_col=0)
tm.assert_frame_equal(df, res)
dateparser = lambda x: pd.datetime.strptime(x, '%m/%d/%Y')
res = read_excel(pth, parse_dates=['date_strings'],
date_parser=dateparser, index_col=0)
tm.assert_frame_equal(df, res)
def test_read_excel_skiprows_list(self, ext):
# GH 4903
actual = pd.read_excel(os.path.join(self.dirpath,
'testskiprows' + ext),
'skiprows_list', skiprows=[0, 2])
expected = DataFrame([[1, 2.5, pd.Timestamp('2015-01-01'), True],
[2, 3.5, pd.Timestamp('2015-01-02'), False],
[3, 4.5, pd.Timestamp('2015-01-03'), False],
[4, 5.5, pd.Timestamp('2015-01-04'), True]],
columns=['a', 'b', 'c', 'd'])
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel(os.path.join(self.dirpath,
'testskiprows' + ext),
'skiprows_list', skiprows=np.array([0, 2]))
tm.assert_frame_equal(actual, expected)
def test_read_excel_nrows(self, ext):
# GH 16645
num_rows_to_pull = 5
actual = pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
nrows=num_rows_to_pull)
expected = pd.read_excel(os.path.join(self.dirpath,
'test1' + ext))
expected = expected[:num_rows_to_pull]
tm.assert_frame_equal(actual, expected)
def test_read_excel_nrows_greater_than_nrows_in_file(self, ext):
# GH 16645
expected = pd.read_excel(os.path.join(self.dirpath,
'test1' + ext))
num_records_in_file = len(expected)
num_rows_to_pull = num_records_in_file + 10
actual = pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
nrows=num_rows_to_pull)
tm.assert_frame_equal(actual, expected)
def test_read_excel_nrows_non_integer_parameter(self, ext):
# GH 16645
msg = "'nrows' must be an integer >=0"
with tm.assert_raises_regex(ValueError, msg):
pd.read_excel(os.path.join(self.dirpath, 'test1' + ext),
nrows='5')
def test_read_excel_squeeze(self, ext):
# GH 12157
f = os.path.join(self.dirpath, 'test_squeeze' + ext)
actual = pd.read_excel(f, 'two_columns', index_col=0, squeeze=True)
expected = pd.Series([2, 3, 4], [4, 5, 6], name='b')
expected.index.name = 'a'
tm.assert_series_equal(actual, expected)
actual = pd.read_excel(f, 'two_columns', squeeze=True)
expected = pd.DataFrame({'a': [4, 5, 6],
'b': [2, 3, 4]})
tm.assert_frame_equal(actual, expected)
actual = pd.read_excel(f, 'one_column', squeeze=True)
expected = pd.Series([1, 2, 3], name='a')
tm.assert_series_equal(actual, expected)
class _WriterBase(SharedItems):
@pytest.fixture(autouse=True)
def set_engine_and_path(self, request, merge_cells, engine, ext):
"""Fixture to set engine and open file for use in each test case
Rather than requiring `engine=...` to be provided explicitly as an
argument in each test, this fixture sets a global option to dictate
which engine should be used to write Excel files. After executing
the test it rolls back said change to the global option.
It also uses a context manager to open a temporary excel file for
the function to write to, accessible via `self.path`
Notes
-----
This fixture will run as part of each test method defined in the
class and any subclasses, on account of the `autouse=True`
argument
"""
option_name = 'io.excel.{ext}.writer'.format(ext=ext.strip('.'))
prev_engine = get_option(option_name)
set_option(option_name, engine)
with ensure_clean(ext) as path:
self.path = path
yield
set_option(option_name, prev_engine) # Roll back option change
@pytest.mark.parametrize("merge_cells", [True, False])
@pytest.mark.parametrize("engine,ext", [
pytest.param('openpyxl', '.xlsx', marks=pytest.mark.skipif(
not td.safe_import('openpyxl'), reason='No openpyxl')),
pytest.param('openpyxl', '.xlsm', marks=pytest.mark.skipif(
not td.safe_import('openpyxl'), reason='No openpyxl')),
pytest.param('xlwt', '.xls', marks=pytest.mark.skipif(
not td.safe_import('xlwt'), reason='No xlwt')),
pytest.param('xlsxwriter', '.xlsx', marks=pytest.mark.skipif(
not td.safe_import('xlsxwriter'), reason='No xlsxwriter'))
])
class TestExcelWriter(_WriterBase):
# Base class for test cases to run with different Excel writers.
def test_excel_sheet_by_name_raise(self, merge_cells, engine, ext):
import xlrd
gt = DataFrame(np.random.randn(10, 2))
gt.to_excel(self.path)
xl = ExcelFile(self.path)
df = read_excel(xl, 0)
tm.assert_frame_equal(gt, df)
with pytest.raises(xlrd.XLRDError):
read_excel(xl, '0')
def test_excelwriter_contextmanager(self, merge_cells, engine, ext):
with ExcelWriter(self.path) as writer:
self.frame.to_excel(writer, 'Data1')
self.frame2.to_excel(writer, 'Data2')
with ExcelFile(self.path) as reader:
found_df = read_excel(reader, 'Data1')
found_df2 = read_excel(reader, 'Data2')
tm.assert_frame_equal(found_df, self.frame)
tm.assert_frame_equal(found_df2, self.frame2)
def test_roundtrip(self, merge_cells, engine, ext):
self.frame['A'][:5] = nan
self.frame.to_excel(self.path, 'test1')
self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
self.frame.to_excel(self.path, 'test1', header=False)
self.frame.to_excel(self.path, 'test1', index=False)
# test roundtrip
self.frame.to_excel(self.path, 'test1')
recons = read_excel(self.path, 'test1', index_col=0)
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(self.path, 'test1', index=False)
recons = read_excel(self.path, 'test1', index_col=None)
recons.index = self.frame.index
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(self.path, 'test1', na_rep='NA')
recons = read_excel(self.path, 'test1', index_col=0, na_values=['NA'])
tm.assert_frame_equal(self.frame, recons)
# GH 3611
self.frame.to_excel(self.path, 'test1', na_rep='88')
recons = read_excel(self.path, 'test1', index_col=0, na_values=['88'])
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(self.path, 'test1', na_rep='88')
recons = read_excel(self.path, 'test1', index_col=0,
na_values=[88, 88.0])
tm.assert_frame_equal(self.frame, recons)
# GH 6573
self.frame.to_excel(self.path, 'Sheet1')
recons = read_excel(self.path, index_col=0)
tm.assert_frame_equal(self.frame, recons)
self.frame.to_excel(self.path, '0')
recons = read_excel(self.path, index_col=0)
tm.assert_frame_equal(self.frame, recons)
# GH 8825 Pandas Series should provide to_excel method
s = self.frame["A"]
s.to_excel(self.path)
recons = read_excel(self.path, index_col=0)
tm.assert_frame_equal(s.to_frame(), recons)
def test_mixed(self, merge_cells, engine, ext):
self.mixed_frame.to_excel(self.path, 'test1')
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1', index_col=0)
tm.assert_frame_equal(self.mixed_frame, recons)
def test_tsframe(self, merge_cells, engine, ext):
df = tm.makeTimeDataFrame()[:5]
df.to_excel(self.path, 'test1')
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1')
tm.assert_frame_equal(df, recons)
def test_basics_with_nan(self, merge_cells, engine, ext):
self.frame['A'][:5] = nan
self.frame.to_excel(self.path, 'test1')
self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
self.frame.to_excel(self.path, 'test1', header=False)
self.frame.to_excel(self.path, 'test1', index=False)
@pytest.mark.parametrize("np_type", [
np.int8, np.int16, np.int32, np.int64])
def test_int_types(self, merge_cells, engine, ext, np_type):
# Test np.int values read come back as int (rather than float
# which is Excel's format).
frame = DataFrame(np.random.randint(-10, 10, size=(10, 2)),
dtype=np_type)
frame.to_excel(self.path, 'test1')
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1')
int_frame = frame.astype(np.int64)
tm.assert_frame_equal(int_frame, recons)
recons2 = read_excel(self.path, 'test1')
tm.assert_frame_equal(int_frame, recons2)
# test with convert_float=False comes back as float
float_frame = frame.astype(float)
recons = read_excel(self.path, 'test1', convert_float=False)
tm.assert_frame_equal(recons, float_frame,
check_index_type=False,
check_column_type=False)
@pytest.mark.parametrize("np_type", [
np.float16, np.float32, np.float64])
def test_float_types(self, merge_cells, engine, ext, np_type):
# Test np.float values read come back as float.
frame = DataFrame(np.random.random_sample(10), dtype=np_type)
frame.to_excel(self.path, 'test1')
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1').astype(np_type)
tm.assert_frame_equal(frame, recons, check_dtype=False)
@pytest.mark.parametrize("np_type", [np.bool8, np.bool_])
def test_bool_types(self, merge_cells, engine, ext, np_type):
# Test np.bool values read come back as float.
frame = (DataFrame([1, 0, True, False], dtype=np_type))
frame.to_excel(self.path, 'test1')
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1').astype(np_type)
tm.assert_frame_equal(frame, recons)
def test_inf_roundtrip(self, merge_cells, engine, ext):
frame = DataFrame([(1, np.inf), (2, 3), (5, -np.inf)])
frame.to_excel(self.path, 'test1')
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1')
tm.assert_frame_equal(frame, recons)
def test_sheets(self, merge_cells, engine, ext):
self.frame['A'][:5] = nan
self.frame.to_excel(self.path, 'test1')
self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
self.frame.to_excel(self.path, 'test1', header=False)
self.frame.to_excel(self.path, 'test1', index=False)
# Test writing to separate sheets
writer = ExcelWriter(self.path)
self.frame.to_excel(writer, 'test1')
self.tsframe.to_excel(writer, 'test2')
writer.save()
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1', index_col=0)
tm.assert_frame_equal(self.frame, recons)
recons = read_excel(reader, 'test2', index_col=0)
tm.assert_frame_equal(self.tsframe, recons)
assert 2 == len(reader.sheet_names)
assert 'test1' == reader.sheet_names[0]
assert 'test2' == reader.sheet_names[1]
def test_colaliases(self, merge_cells, engine, ext):
self.frame['A'][:5] = nan
self.frame.to_excel(self.path, 'test1')
self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
self.frame.to_excel(self.path, 'test1', header=False)
self.frame.to_excel(self.path, 'test1', index=False)
# column aliases
col_aliases = Index(['AA', 'X', 'Y', 'Z'])
self.frame2.to_excel(self.path, 'test1', header=col_aliases)
reader = ExcelFile(self.path)
rs = read_excel(reader, 'test1', index_col=0)
xp = self.frame2.copy()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
def test_roundtrip_indexlabels(self, merge_cells, engine, ext):
self.frame['A'][:5] = nan
self.frame.to_excel(self.path, 'test1')
self.frame.to_excel(self.path, 'test1', columns=['A', 'B'])
self.frame.to_excel(self.path, 'test1', header=False)
self.frame.to_excel(self.path, 'test1', index=False)
# test index_label
frame = (DataFrame(np.random.randn(10, 2)) >= 0)
frame.to_excel(self.path, 'test1',
index_label=['test'],
merge_cells=merge_cells)
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1',
index_col=0,
).astype(np.int64)
frame.index.names = ['test']
assert frame.index.names == recons.index.names
frame = (DataFrame(np.random.randn(10, 2)) >= 0)
frame.to_excel(self.path,
'test1',
index_label=['test', 'dummy', 'dummy2'],
merge_cells=merge_cells)
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1',
index_col=0,
).astype(np.int64)
frame.index.names = ['test']
assert frame.index.names == recons.index.names
frame = (DataFrame(np.random.randn(10, 2)) >= 0)
frame.to_excel(self.path,
'test1',
index_label='test',
merge_cells=merge_cells)
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1',
index_col=0,
).astype(np.int64)
frame.index.names = ['test']
tm.assert_frame_equal(frame, recons.astype(bool))
self.frame.to_excel(self.path,
'test1',
columns=['A', 'B', 'C', 'D'],
index=False, merge_cells=merge_cells)
# take 'A' and 'B' as indexes (same row as cols 'C', 'D')
df = self.frame.copy()
df = df.set_index(['A', 'B'])
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1', index_col=[0, 1])
tm.assert_frame_equal(df, recons, check_less_precise=True)
def test_excel_roundtrip_indexname(self, merge_cells, engine, ext):
df = DataFrame(np.random.randn(10, 4))
df.index.name = 'foo'
df.to_excel(self.path, merge_cells=merge_cells)
xf = ExcelFile(self.path)
result = read_excel(xf, xf.sheet_names[0],
index_col=0)
tm.assert_frame_equal(result, df)
assert result.index.name == 'foo'
def test_excel_roundtrip_datetime(self, merge_cells, engine, ext):
# datetime.date, not sure what to test here exactly
tsf = self.tsframe.copy()
tsf.index = [x.date() for x in self.tsframe.index]
tsf.to_excel(self.path, 'test1', merge_cells=merge_cells)
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1')
tm.assert_frame_equal(self.tsframe, recons)
# GH4133 - excel output format strings
def test_excel_date_datetime_format(self, merge_cells, engine, ext):
df = DataFrame([[date(2014, 1, 31),
date(1999, 9, 24)],
[datetime(1998, 5, 26, 23, 33, 4),
datetime(2014, 2, 28, 13, 5, 13)]],
index=['DATE', 'DATETIME'], columns=['X', 'Y'])
df_expected = DataFrame([[datetime(2014, 1, 31),
datetime(1999, 9, 24)],
[datetime(1998, 5, 26, 23, 33, 4),
datetime(2014, 2, 28, 13, 5, 13)]],
index=['DATE', 'DATETIME'], columns=['X', 'Y'])
with ensure_clean(ext) as filename2:
writer1 = ExcelWriter(self.path)
writer2 = ExcelWriter(filename2,
date_format='DD.MM.YYYY',
datetime_format='DD.MM.YYYY HH-MM-SS')
df.to_excel(writer1, 'test1')
df.to_excel(writer2, 'test1')
writer1.close()
writer2.close()
reader1 = ExcelFile(self.path)
reader2 = ExcelFile(filename2)
rs1 = read_excel(reader1, 'test1', index_col=None)
rs2 = read_excel(reader2, 'test1', index_col=None)
tm.assert_frame_equal(rs1, rs2)
# since the reader returns a datetime object for dates, we need
# to use df_expected to check the result
tm.assert_frame_equal(rs2, df_expected)
def test_to_excel_interval_no_labels(self, merge_cells, engine, ext):
# GH19242 - test writing Interval without labels
frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)),
dtype=np.int64)
expected = frame.copy()
frame['new'] = pd.cut(frame[0], 10)
expected['new'] = pd.cut(expected[0], 10).astype(str)
frame.to_excel(self.path, 'test1')
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1')
tm.assert_frame_equal(expected, recons)
def test_to_excel_interval_labels(self, merge_cells, engine, ext):
# GH19242 - test writing Interval with labels
frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)),
dtype=np.int64)
expected = frame.copy()
intervals = pd.cut(frame[0], 10, labels=['A', 'B', 'C', 'D', 'E',
'F', 'G', 'H', 'I', 'J'])
frame['new'] = intervals
expected['new'] = pd.Series(list(intervals))
frame.to_excel(self.path, 'test1')
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1')
tm.assert_frame_equal(expected, recons)
def test_to_excel_timedelta(self, merge_cells, engine, ext):
# GH 19242, GH9155 - test writing timedelta to xls
frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)),
columns=['A'],
dtype=np.int64
)
expected = frame.copy()
frame['new'] = frame['A'].apply(lambda x: timedelta(seconds=x))
expected['new'] = expected['A'].apply(
lambda x: timedelta(seconds=x).total_seconds() / float(86400))
frame.to_excel(self.path, 'test1')
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1')
tm.assert_frame_equal(expected, recons)
def test_to_excel_periodindex(self, merge_cells, engine, ext):
frame = self.tsframe
xp = frame.resample('M', kind='period').mean()
xp.to_excel(self.path, 'sht1')
reader = ExcelFile(self.path)
rs = read_excel(reader, 'sht1', index_col=0)
tm.assert_frame_equal(xp, rs.to_period('M'))
def test_to_excel_multiindex(self, merge_cells, engine, ext):
frame = self.frame
arrays = np.arange(len(frame.index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays,
names=['first', 'second'])
frame.index = new_index
frame.to_excel(self.path, 'test1', header=False)
frame.to_excel(self.path, 'test1', columns=['A', 'B'])
# round trip
frame.to_excel(self.path, 'test1', merge_cells=merge_cells)
reader = ExcelFile(self.path)
df = read_excel(reader, 'test1', index_col=[0, 1])
tm.assert_frame_equal(frame, df)
# GH13511
def test_to_excel_multiindex_nan_label(self, merge_cells, engine, ext):
frame = pd.DataFrame({'A': [None, 2, 3],
'B': [10, 20, 30],
'C': np.random.sample(3)})
frame = frame.set_index(['A', 'B'])
frame.to_excel(self.path, merge_cells=merge_cells)
df = read_excel(self.path, index_col=[0, 1])
tm.assert_frame_equal(frame, df)
# Test for Issue 11328. If column indices are integers, make
# sure they are handled correctly for either setting of
# merge_cells
def test_to_excel_multiindex_cols(self, merge_cells, engine, ext):
frame = self.frame
arrays = np.arange(len(frame.index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays,
names=['first', 'second'])
frame.index = new_index
new_cols_index = MultiIndex.from_tuples([(40, 1), (40, 2),
(50, 1), (50, 2)])
frame.columns = new_cols_index
header = [0, 1]
if not merge_cells:
header = 0
# round trip
frame.to_excel(self.path, 'test1', merge_cells=merge_cells)
reader = ExcelFile(self.path)
df = read_excel(reader, 'test1', header=header,
index_col=[0, 1])
if not merge_cells:
fm = frame.columns.format(sparsify=False,
adjoin=False, names=False)
frame.columns = [".".join(map(str, q)) for q in zip(*fm)]
tm.assert_frame_equal(frame, df)
def test_to_excel_multiindex_dates(self, merge_cells, engine, ext):
# try multiindex with dates
tsframe = self.tsframe.copy()
new_index = [tsframe.index, np.arange(len(tsframe.index))]
tsframe.index = MultiIndex.from_arrays(new_index)
tsframe.index.names = ['time', 'foo']
tsframe.to_excel(self.path, 'test1', merge_cells=merge_cells)
reader = ExcelFile(self.path)
recons = read_excel(reader, 'test1',
index_col=[0, 1])
tm.assert_frame_equal(tsframe, recons)
assert recons.index.names == ('time', 'foo')
def test_to_excel_multiindex_no_write_index(self, merge_cells, engine,
ext):
# Test writing and re-reading a MI witout the index. GH 5616.
# Initial non-MI frame.
frame1 = DataFrame({'a': [10, 20], 'b': [30, 40], 'c': [50, 60]})
# Add a MI.
frame2 = frame1.copy()
multi_index = MultiIndex.from_tuples([(70, 80), (90, 100)])
frame2.index = multi_index
# Write out to Excel without the index.
frame2.to_excel(self.path, 'test1', index=False)
# Read it back in.
reader = ExcelFile(self.path)
frame3 = read_excel(reader, 'test1')
# Test that it is the same as the initial frame.
tm.assert_frame_equal(frame1, frame3)
def test_to_excel_float_format(self, merge_cells, engine, ext):
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
df.to_excel(self.path, 'test1', float_format='%.2f')
reader = ExcelFile(self.path)
rs = read_excel(reader, 'test1', index_col=None)
xp = DataFrame([[0.12, 0.23, 0.57],
[12.32, 123123.20, 321321.20]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
tm.assert_frame_equal(rs, xp)
def test_to_excel_output_encoding(self, merge_cells, engine, ext):
# avoid mixed inferred_type
df = DataFrame([[u'\u0192', u'\u0193', u'\u0194'],
[u'\u0195', u'\u0196', u'\u0197']],
index=[u'A\u0192', u'B'],
columns=[u'X\u0193', u'Y', u'Z'])
with ensure_clean('__tmp_to_excel_float_format__.' + ext) as filename:
df.to_excel(filename, sheet_name='TestSheet', encoding='utf8')
result = read_excel(filename, 'TestSheet', encoding='utf8')
tm.assert_frame_equal(result, df)
def test_to_excel_unicode_filename(self, merge_cells, engine, ext):
with ensure_clean(u('\u0192u.') + ext) as filename:
try:
f = open(filename, 'wb')
except UnicodeEncodeError:
pytest.skip('no unicode file names on this system')
else:
f.close()
df = DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
df.to_excel(filename, 'test1', float_format='%.2f')
reader = ExcelFile(filename)
rs = read_excel(reader, 'test1', index_col=None)
xp = DataFrame([[0.12, 0.23, 0.57],
[12.32, 123123.20, 321321.20]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
tm.assert_frame_equal(rs, xp)
# def test_to_excel_header_styling_xls(self, merge_cells, engine, ext):
# import StringIO
# s = StringIO(
# """Date,ticker,type,value
# 2001-01-01,x,close,12.2
# 2001-01-01,x,open ,12.1
# 2001-01-01,y,close,12.2
# 2001-01-01,y,open ,12.1
# 2001-02-01,x,close,12.2
# 2001-02-01,x,open ,12.1
# 2001-02-01,y,close,12.2
# 2001-02-01,y,open ,12.1
# 2001-03-01,x,close,12.2
# 2001-03-01,x,open ,12.1
# 2001-03-01,y,close,12.2
# 2001-03-01,y,open ,12.1""")
# df = read_csv(s, parse_dates=["Date"])
# pdf = df.pivot_table(values="value", rows=["ticker"],
# cols=["Date", "type"])
# try:
# import xlwt
# import xlrd
# except ImportError:
# pytest.skip
# filename = '__tmp_to_excel_header_styling_xls__.xls'
# pdf.to_excel(filename, 'test1')
# wbk = xlrd.open_workbook(filename,
# formatting_info=True)
# assert ["test1"] == wbk.sheet_names()
# ws = wbk.sheet_by_name('test1')
# assert [(0, 1, 5, 7), (0, 1, 3, 5), (0, 1, 1, 3)] == ws.merged_cells
# for i in range(0, 2):
# for j in range(0, 7):
# xfx = ws.cell_xf_index(0, 0)
# cell_xf = wbk.xf_list[xfx]
# font = wbk.font_list
# assert 1 == font[cell_xf.font_index].bold
# assert 1 == cell_xf.border.top_line_style
# assert 1 == cell_xf.border.right_line_style
# assert 1 == cell_xf.border.bottom_line_style
# assert 1 == cell_xf.border.left_line_style
# assert 2 == cell_xf.alignment.hor_align
# os.remove(filename)
# def test_to_excel_header_styling_xlsx(self, merge_cells, engine, ext):
# import StringIO
# s = StringIO(
# """Date,ticker,type,value
# 2001-01-01,x,close,12.2
# 2001-01-01,x,open ,12.1
# 2001-01-01,y,close,12.2
# 2001-01-01,y,open ,12.1
# 2001-02-01,x,close,12.2
# 2001-02-01,x,open ,12.1
# 2001-02-01,y,close,12.2
# 2001-02-01,y,open ,12.1
# 2001-03-01,x,close,12.2
# 2001-03-01,x,open ,12.1
# 2001-03-01,y,close,12.2
# 2001-03-01,y,open ,12.1""")
# df = read_csv(s, parse_dates=["Date"])
# pdf = df.pivot_table(values="value", rows=["ticker"],
# cols=["Date", "type"])
# try:
# import openpyxl
# from openpyxl.cell import get_column_letter
# except ImportError:
# pytest.skip
# if openpyxl.__version__ < '1.6.1':
# pytest.skip
# # test xlsx_styling
# filename = '__tmp_to_excel_header_styling_xlsx__.xlsx'
# pdf.to_excel(filename, 'test1')
# wbk = openpyxl.load_workbook(filename)
# assert ["test1"] == wbk.get_sheet_names()
# ws = wbk.get_sheet_by_name('test1')
# xlsaddrs = ["%s2" % chr(i) for i in range(ord('A'), ord('H'))]
# xlsaddrs += ["A%s" % i for i in range(1, 6)]
# xlsaddrs += ["B1", "D1", "F1"]
# for xlsaddr in xlsaddrs:
# cell = ws.cell(xlsaddr)
# assert cell.style.font.bold
# assert (openpyxl.style.Border.BORDER_THIN ==
# cell.style.borders.top.border_style)
# assert (openpyxl.style.Border.BORDER_THIN ==
# cell.style.borders.right.border_style)
# assert (openpyxl.style.Border.BORDER_THIN ==
# cell.style.borders.bottom.border_style)
# assert (openpyxl.style.Border.BORDER_THIN ==
# cell.style.borders.left.border_style)
# assert (openpyxl.style.Alignment.HORIZONTAL_CENTER ==
# cell.style.alignment.horizontal)
# mergedcells_addrs = ["C1", "E1", "G1"]
# for maddr in mergedcells_addrs:
# assert ws.cell(maddr).merged
# os.remove(filename)
def test_excel_010_hemstring(self, merge_cells, engine, ext):
if merge_cells:
pytest.skip('Skip tests for merged MI format.')
from pandas.util.testing import makeCustomDataframe as mkdf
# ensure limited functionality in 0.10
# override of #2370 until sorted out in 0.11
def roundtrip(df, header=True, parser_hdr=0, index=True):
df.to_excel(self.path, header=header,
merge_cells=merge_cells, index=index)
xf = ExcelFile(self.path)
res = read_excel(xf, xf.sheet_names[0], header=parser_hdr)
return res
nrows = 5
ncols = 3
for use_headers in (True, False):
for i in range(1, 4): # row multindex up to nlevel=3
for j in range(1, 4): # col ""
df = mkdf(nrows, ncols, r_idx_nlevels=i, c_idx_nlevels=j)
# this if will be removed once multi column excel writing
# is implemented for now fixing #9794
if j > 1:
with pytest.raises(NotImplementedError):
res = roundtrip(df, use_headers, index=False)
else:
res = roundtrip(df, use_headers)
if use_headers:
assert res.shape == (nrows, ncols + i)
else:
# first row taken as columns
assert res.shape == (nrows - 1, ncols + i)
# no nans
for r in range(len(res.index)):
for c in range(len(res.columns)):
assert res.iloc[r, c] is not np.nan
res = roundtrip(DataFrame([0]))
assert res.shape == (1, 1)
assert res.iloc[0, 0] is not np.nan
res = roundtrip(DataFrame([0]), False, None)
assert res.shape == (1, 2)
assert res.iloc[0, 0] is not np.nan
def test_excel_010_hemstring_raises_NotImplementedError(self, merge_cells,
engine, ext):
# This test was failing only for j>1 and header=False,
# So I reproduced a simple test.
if merge_cells:
pytest.skip('Skip tests for merged MI format.')
from pandas.util.testing import makeCustomDataframe as mkdf
# ensure limited functionality in 0.10
# override of #2370 until sorted out in 0.11
def roundtrip2(df, header=True, parser_hdr=0, index=True):
df.to_excel(self.path, header=header,
merge_cells=merge_cells, index=index)
xf = ExcelFile(self.path)
res = read_excel(xf, xf.sheet_names[0], header=parser_hdr)
return res
nrows = 5
ncols = 3
j = 2
i = 1
df = mkdf(nrows, ncols, r_idx_nlevels=i, c_idx_nlevels=j)
with pytest.raises(NotImplementedError):
roundtrip2(df, header=False, index=False)
def test_duplicated_columns(self, merge_cells, engine, ext):
# Test for issue #5235
write_frame = DataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3]])
colnames = ['A', 'B', 'B']
write_frame.columns = colnames
write_frame.to_excel(self.path, 'test1')
read_frame = read_excel(self.path, 'test1')
read_frame.columns = colnames
tm.assert_frame_equal(write_frame, read_frame)
# 11007 / #10970
write_frame = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]],
columns=['A', 'B', 'A', 'B'])
write_frame.to_excel(self.path, 'test1')
read_frame = read_excel(self.path, 'test1')
read_frame.columns = ['A', 'B', 'A', 'B']
tm.assert_frame_equal(write_frame, read_frame)
# 10982
write_frame.to_excel(self.path, 'test1', index=False, header=False)
read_frame = read_excel(self.path, 'test1', header=None)
write_frame.columns = [0, 1, 2, 3]
tm.assert_frame_equal(write_frame, read_frame)
def test_swapped_columns(self, merge_cells, engine, ext):
# Test for issue #5427.
write_frame = DataFrame({'A': [1, 1, 1],
'B': [2, 2, 2]})
write_frame.to_excel(self.path, 'test1', columns=['B', 'A'])
read_frame = read_excel(self.path, 'test1', header=0)
tm.assert_series_equal(write_frame['A'], read_frame['A'])
tm.assert_series_equal(write_frame['B'], read_frame['B'])
def test_invalid_columns(self, merge_cells, engine, ext):
# 10982
write_frame = DataFrame({'A': [1, 1, 1],
'B': [2, 2, 2]})
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
write_frame.to_excel(self.path, 'test1', columns=['B', 'C'])
expected = write_frame.reindex(columns=['B', 'C'])
read_frame = read_excel(self.path, 'test1')
tm.assert_frame_equal(expected, read_frame)
with pytest.raises(KeyError):
write_frame.to_excel(self.path, 'test1', columns=['C', 'D'])
def test_comment_arg(self, merge_cells, engine, ext):
# Re issue #18735
# Test the comment argument functionality to read_excel
# Create file to read in
df = DataFrame({'A': ['one', '#one', 'one'],
'B': ['two', 'two', '#two']})
df.to_excel(self.path, 'test_c')
# Read file without comment arg
result1 = read_excel(self.path, 'test_c')
result1.iloc[1, 0] = None
result1.iloc[1, 1] = None
result1.iloc[2, 1] = None
result2 = read_excel(self.path, 'test_c', comment='#')
tm.assert_frame_equal(result1, result2)
def test_comment_default(self, merge_cells, engine, ext):
# Re issue #18735
# Test the comment argument default to read_excel
# Create file to read in
df = DataFrame({'A': ['one', '#one', 'one'],
'B': ['two', 'two', '#two']})
df.to_excel(self.path, 'test_c')
# Read file with default and explicit comment=None
result1 = read_excel(self.path, 'test_c')
result2 = read_excel(self.path, 'test_c', comment=None)
tm.assert_frame_equal(result1, result2)
def test_comment_used(self, merge_cells, engine, ext):
# Re issue #18735
# Test the comment argument is working as expected when used
# Create file to read in
df = DataFrame({'A': ['one', '#one', 'one'],
'B': ['two', 'two', '#two']})
df.to_excel(self.path, 'test_c')
# Test read_frame_comment against manually produced expected output
expected = DataFrame({'A': ['one', None, 'one'],
'B': ['two', None, None]})
result = read_excel(self.path, 'test_c', comment='#')
tm.assert_frame_equal(result, expected)
def test_comment_emptyline(self, merge_cells, engine, ext):
# Re issue #18735
# Test that read_excel ignores commented lines at the end of file
df = DataFrame({'a': ['1', '#2'], 'b': ['2', '3']})
df.to_excel(self.path, index=False)
# Test that all-comment lines at EoF are ignored
expected = DataFrame({'a': [1], 'b': [2]})
result = read_excel(self.path, comment='#')
tm.assert_frame_equal(result, expected)
def test_datetimes(self, merge_cells, engine, ext):
# Test writing and reading datetimes. For issue #9139. (xref #9185)
datetimes = [datetime(2013, 1, 13, 1, 2, 3),
datetime(2013, 1, 13, 2, 45, 56),
datetime(2013, 1, 13, 4, 29, 49),
datetime(2013, 1, 13, 6, 13, 42),
datetime(2013, 1, 13, 7, 57, 35),
datetime(2013, 1, 13, 9, 41, 28),
datetime(2013, 1, 13, 11, 25, 21),
datetime(2013, 1, 13, 13, 9, 14),
datetime(2013, 1, 13, 14, 53, 7),
datetime(2013, 1, 13, 16, 37, 0),
datetime(2013, 1, 13, 18, 20, 52)]
write_frame = DataFrame({'A': datetimes})
write_frame.to_excel(self.path, 'Sheet1')
read_frame = read_excel(self.path, 'Sheet1', header=0)
tm.assert_series_equal(write_frame['A'], read_frame['A'])
# GH7074
def test_bytes_io(self, merge_cells, engine, ext):
bio = BytesIO()
df = DataFrame(np.random.randn(10, 2))
# pass engine explicitly as there is no file path to infer from
writer = ExcelWriter(bio, engine=engine)
df.to_excel(writer)
writer.save()
bio.seek(0)
reread_df = read_excel(bio)
tm.assert_frame_equal(df, reread_df)
# GH8188
def test_write_lists_dict(self, merge_cells, engine, ext):
df = DataFrame({'mixed': ['a', ['b', 'c'], {'d': 'e', 'f': 2}],
'numeric': [1, 2, 3.0],
'str': ['apple', 'banana', 'cherry']})
expected = df.copy()
expected.mixed = expected.mixed.apply(str)
expected.numeric = expected.numeric.astype('int64')
df.to_excel(self.path, 'Sheet1')
read = read_excel(self.path, 'Sheet1', header=0)
tm.assert_frame_equal(read, expected)
# GH13347
def test_true_and_false_value_options(self, merge_cells, engine, ext):
df = pd.DataFrame([['foo', 'bar']], columns=['col1', 'col2'])
expected = df.replace({'foo': True,
'bar': False})
df.to_excel(self.path)
read_frame = read_excel(self.path, true_values=['foo'],
false_values=['bar'])
tm.assert_frame_equal(read_frame, expected)
def test_freeze_panes(self, merge_cells, engine, ext):
# GH15160
expected = DataFrame([[1, 2], [3, 4]], columns=['col1', 'col2'])
expected.to_excel(self.path, "Sheet1", freeze_panes=(1, 1))
result = read_excel(self.path)
tm.assert_frame_equal(expected, result)
def test_path_pathlib(self, merge_cells, engine, ext):
df = tm.makeDataFrame()
writer = partial(df.to_excel, engine=engine)
reader = partial(pd.read_excel)
result = tm.round_trip_pathlib(writer, reader,
path="foo.{}".format(ext))
tm.assert_frame_equal(df, result)
def test_path_localpath(self, merge_cells, engine, ext):
df = tm.makeDataFrame()
writer = partial(df.to_excel, engine=engine)
reader = partial(pd.read_excel)
result = tm.round_trip_pathlib(writer, reader,
path="foo.{}".format(ext))
tm.assert_frame_equal(df, result)
@td.skip_if_no('openpyxl')
@pytest.mark.parametrize("merge_cells,ext,engine", [
(None, '.xlsx', 'openpyxl')])
class TestOpenpyxlTests(_WriterBase):
def test_to_excel_styleconverter(self, merge_cells, ext, engine):
from openpyxl import styles
hstyle = {
"font": {
"color": '00FF0000',
"bold": True,
},
"borders": {
"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin",
},
"alignment": {
"horizontal": "center",
"vertical": "top",
},
"fill": {
"patternType": 'solid',
'fgColor': {
'rgb': '006666FF',
'tint': 0.3,
},
},
"number_format": {
"format_code": "0.00"
},
"protection": {
"locked": True,
"hidden": False,
},
}
font_color = styles.Color('00FF0000')
font = styles.Font(bold=True, color=font_color)
side = styles.Side(style=styles.borders.BORDER_THIN)
border = styles.Border(top=side, right=side, bottom=side, left=side)
alignment = styles.Alignment(horizontal='center', vertical='top')
fill_color = styles.Color(rgb='006666FF', tint=0.3)
fill = styles.PatternFill(patternType='solid', fgColor=fill_color)
number_format = '0.00'
protection = styles.Protection(locked=True, hidden=False)
kw = _OpenpyxlWriter._convert_to_style_kwargs(hstyle)
assert kw['font'] == font
assert kw['border'] == border
assert kw['alignment'] == alignment
assert kw['fill'] == fill
assert kw['number_format'] == number_format
assert kw['protection'] == protection
def test_write_cells_merge_styled(self, merge_cells, ext, engine):
from pandas.io.formats.excel import ExcelCell
sheet_name = 'merge_styled'
sty_b1 = {'font': {'color': '00FF0000'}}
sty_a2 = {'font': {'color': '0000FF00'}}
initial_cells = [
ExcelCell(col=1, row=0, val=42, style=sty_b1),
ExcelCell(col=0, row=1, val=99, style=sty_a2),
]
sty_merged = {'font': {'color': '000000FF', 'bold': True}}
sty_kwargs = _OpenpyxlWriter._convert_to_style_kwargs(sty_merged)
openpyxl_sty_merged = sty_kwargs['font']
merge_cells = [
ExcelCell(col=0, row=0, val='pandas',
mergestart=1, mergeend=1, style=sty_merged),
]
with ensure_clean(ext) as path:
writer = _OpenpyxlWriter(path)
writer.write_cells(initial_cells, sheet_name=sheet_name)
writer.write_cells(merge_cells, sheet_name=sheet_name)
wks = writer.sheets[sheet_name]
xcell_b1 = wks['B1']
xcell_a2 = wks['A2']
assert xcell_b1.font == openpyxl_sty_merged
assert xcell_a2.font == openpyxl_sty_merged
@pytest.mark.parametrize("mode,expected", [
('w', ['baz']), ('a', ['foo', 'bar', 'baz'])])
def test_write_append_mode(self, merge_cells, ext, engine, mode, expected):
import openpyxl
df = DataFrame([1], columns=['baz'])
with ensure_clean(ext) as f:
wb = openpyxl.Workbook()
wb.worksheets[0].title = 'foo'
wb.worksheets[0]['A1'].value = 'foo'
wb.create_sheet('bar')
wb.worksheets[1]['A1'].value = 'bar'
wb.save(f)
writer = ExcelWriter(f, engine=engine, mode=mode)
df.to_excel(writer, sheet_name='baz', index=False)
writer.save()
wb2 = openpyxl.load_workbook(f)
result = [sheet.title for sheet in wb2.worksheets]
assert result == expected
for index, cell_value in enumerate(expected):
assert wb2.worksheets[index]['A1'].value == cell_value
@td.skip_if_no('xlwt')
@pytest.mark.parametrize("merge_cells,ext,engine", [
(None, '.xls', 'xlwt')])
class TestXlwtTests(_WriterBase):
def test_excel_raise_error_on_multiindex_columns_and_no_index(
self, merge_cells, ext, engine):
# MultiIndex as columns is not yet implemented 9794
cols = MultiIndex.from_tuples([('site', ''),
('2014', 'height'),
('2014', 'weight')])
df = DataFrame(np.random.randn(10, 3), columns=cols)
with pytest.raises(NotImplementedError):
with ensure_clean(ext) as path:
df.to_excel(path, index=False)
def test_excel_multiindex_columns_and_index_true(self, merge_cells, ext,
engine):
cols = MultiIndex.from_tuples([('site', ''),
('2014', 'height'),
('2014', 'weight')])
df = pd.DataFrame(np.random.randn(10, 3), columns=cols)
with ensure_clean(ext) as path:
df.to_excel(path, index=True)
def test_excel_multiindex_index(self, merge_cells, ext, engine):
# MultiIndex as index works so assert no error #9794
cols = MultiIndex.from_tuples([('site', ''),
('2014', 'height'),
('2014', 'weight')])
df = DataFrame(np.random.randn(3, 10), index=cols)
with ensure_clean(ext) as path:
df.to_excel(path, index=False)
def test_to_excel_styleconverter(self, merge_cells, ext, engine):
import xlwt
hstyle = {"font": {"bold": True},
"borders": {"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin"},
"alignment": {"horizontal": "center", "vertical": "top"}}
xls_style = _XlwtWriter._convert_to_style(hstyle)
assert xls_style.font.bold
assert xlwt.Borders.THIN == xls_style.borders.top
assert xlwt.Borders.THIN == xls_style.borders.right
assert xlwt.Borders.THIN == xls_style.borders.bottom
assert xlwt.Borders.THIN == xls_style.borders.left
assert xlwt.Alignment.HORZ_CENTER == xls_style.alignment.horz
assert xlwt.Alignment.VERT_TOP == xls_style.alignment.vert
def test_write_append_mode_raises(self, merge_cells, ext, engine):
msg = "Append mode is not supported with xlwt!"
with ensure_clean(ext) as f:
with tm.assert_raises_regex(ValueError, msg):
ExcelWriter(f, engine=engine, mode='a')
@td.skip_if_no('xlsxwriter')
@pytest.mark.parametrize("merge_cells,ext,engine", [
(None, '.xlsx', 'xlsxwriter')])
class TestXlsxWriterTests(_WriterBase):
@td.skip_if_no('openpyxl')
def test_column_format(self, merge_cells, ext, engine):
# Test that column formats are applied to cells. Test for issue #9167.
# Applicable to xlsxwriter only.
with warnings.catch_warnings():
# Ignore the openpyxl lxml warning.
warnings.simplefilter("ignore")
import openpyxl
with ensure_clean(ext) as path:
frame = DataFrame({'A': [123456, 123456],
'B': [123456, 123456]})
writer = ExcelWriter(path)
frame.to_excel(writer)
# Add a number format to col B and ensure it is applied to cells.
num_format = '#,##0'
write_workbook = writer.book
write_worksheet = write_workbook.worksheets()[0]
col_format = write_workbook.add_format({'num_format': num_format})
write_worksheet.set_column('B:B', None, col_format)
writer.save()
read_workbook = openpyxl.load_workbook(path)
try:
read_worksheet = read_workbook['Sheet1']
except TypeError:
# compat
read_worksheet = read_workbook.get_sheet_by_name(name='Sheet1')
# Get the number format from the cell.
try:
cell = read_worksheet['B2']
except TypeError:
# compat
cell = read_worksheet.cell('B2')
try:
read_num_format = cell.number_format
except Exception:
read_num_format = cell.style.number_format._format_code
assert read_num_format == num_format
def test_write_append_mode_raises(self, merge_cells, ext, engine):
msg = "Append mode is not supported with xlsxwriter!"
with ensure_clean(ext) as f:
with tm.assert_raises_regex(ValueError, msg):
ExcelWriter(f, engine=engine, mode='a')
class TestExcelWriterEngineTests(object):
@pytest.mark.parametrize('klass,ext', [
pytest.param(_XlsxWriter, '.xlsx', marks=pytest.mark.skipif(
not td.safe_import('xlsxwriter'), reason='No xlsxwriter')),
pytest.param(_OpenpyxlWriter, '.xlsx', marks=pytest.mark.skipif(
not td.safe_import('openpyxl'), reason='No openpyxl')),
pytest.param(_XlwtWriter, '.xls', marks=pytest.mark.skipif(
not td.safe_import('xlwt'), reason='No xlwt'))
])
def test_ExcelWriter_dispatch(self, klass, ext):
with ensure_clean(ext) as path:
writer = ExcelWriter(path)
if ext == '.xlsx' and td.safe_import('xlsxwriter'):
# xlsxwriter has preference over openpyxl if both installed
assert isinstance(writer, _XlsxWriter)
else:
assert isinstance(writer, klass)
def test_ExcelWriter_dispatch_raises(self):
with tm.assert_raises_regex(ValueError, 'No engine'):
ExcelWriter('nothing')
def test_register_writer(self):
# some awkward mocking to test out dispatch and such actually works
called_save = []
called_write_cells = []
class DummyClass(ExcelWriter):
called_save = False
called_write_cells = False
supported_extensions = ['test', 'xlsx', 'xls']
engine = 'dummy'
def save(self):
called_save.append(True)
def write_cells(self, *args, **kwargs):
called_write_cells.append(True)
def check_called(func):
func()
assert len(called_save) >= 1
assert len(called_write_cells) >= 1
del called_save[:]
del called_write_cells[:]
with pd.option_context('io.excel.xlsx.writer', 'dummy'):
register_writer(DummyClass)
writer = ExcelWriter('something.test')
assert isinstance(writer, DummyClass)
df = tm.makeCustomDataframe(1, 1)
with catch_warnings(record=True):
panel = tm.makePanel()
func = lambda: df.to_excel('something.test')
check_called(func)
check_called(lambda: panel.to_excel('something.test'))
check_called(lambda: df.to_excel('something.xlsx'))
check_called(
lambda: df.to_excel(
'something.xls', engine='dummy'))
@pytest.mark.parametrize('engine', [
pytest.param('xlwt',
marks=pytest.mark.xfail(reason='xlwt does not support '
'openpyxl-compatible '
'style dicts')),
'xlsxwriter',
'openpyxl',
])
def test_styler_to_excel(engine):
def style(df):
# XXX: RGB colors not supported in xlwt
return DataFrame([['font-weight: bold', '', ''],
['', 'color: blue', ''],
['', '', 'text-decoration: underline'],
['border-style: solid', '', ''],
['', 'font-style: italic', ''],
['', '', 'text-align: right'],
['background-color: red', '', ''],
['', '', ''],
['', '', ''],
['', '', '']],
index=df.index, columns=df.columns)
def assert_equal_style(cell1, cell2):
# XXX: should find a better way to check equality
assert cell1.alignment.__dict__ == cell2.alignment.__dict__
assert cell1.border.__dict__ == cell2.border.__dict__
assert cell1.fill.__dict__ == cell2.fill.__dict__
assert cell1.font.__dict__ == cell2.font.__dict__
assert cell1.number_format == cell2.number_format
assert cell1.protection.__dict__ == cell2.protection.__dict__
def custom_converter(css):
# use bold iff there is custom style attached to the cell
if css.strip(' \n;'):
return {'font': {'bold': True}}
return {}
pytest.importorskip('jinja2')
pytest.importorskip(engine)
# Prepare spreadsheets
df = DataFrame(np.random.randn(10, 3))
with ensure_clean('.xlsx' if engine != 'xlwt' else '.xls') as path:
writer = ExcelWriter(path, engine=engine)
df.to_excel(writer, sheet_name='frame')
df.style.to_excel(writer, sheet_name='unstyled')
styled = df.style.apply(style, axis=None)
styled.to_excel(writer, sheet_name='styled')
ExcelFormatter(styled, style_converter=custom_converter).write(
writer, sheet_name='custom')
writer.save()
if engine not in ('openpyxl', 'xlsxwriter'):
# For other engines, we only smoke test
return
openpyxl = pytest.importorskip('openpyxl')
wb = openpyxl.load_workbook(path)
# (1) compare DataFrame.to_excel and Styler.to_excel when unstyled
n_cells = 0
for col1, col2 in zip(wb['frame'].columns,
wb['unstyled'].columns):
assert len(col1) == len(col2)
for cell1, cell2 in zip(col1, col2):
assert cell1.value == cell2.value
assert_equal_style(cell1, cell2)
n_cells += 1
# ensure iteration actually happened:
assert n_cells == (10 + 1) * (3 + 1)
# (2) check styling with default converter
# XXX: openpyxl (as at 2.4) prefixes colors with 00, xlsxwriter with FF
alpha = '00' if engine == 'openpyxl' else 'FF'
n_cells = 0
for col1, col2 in zip(wb['frame'].columns,
wb['styled'].columns):
assert len(col1) == len(col2)
for cell1, cell2 in zip(col1, col2):
ref = '%s%d' % (cell2.column, cell2.row)
# XXX: this isn't as strong a test as ideal; we should
# confirm that differences are exclusive
if ref == 'B2':
assert not cell1.font.bold
assert cell2.font.bold
elif ref == 'C3':
assert cell1.font.color.rgb != cell2.font.color.rgb
assert cell2.font.color.rgb == alpha + '0000FF'
elif ref == 'D4':
# This fails with engine=xlsxwriter due to
# https://bitbucket.org/openpyxl/openpyxl/issues/800
if engine == 'xlsxwriter' \
and (LooseVersion(openpyxl.__version__) <
LooseVersion('2.4.6')):
pass
else:
assert cell1.font.underline != cell2.font.underline
assert cell2.font.underline == 'single'
elif ref == 'B5':
assert not cell1.border.left.style
assert (cell2.border.top.style ==
cell2.border.right.style ==
cell2.border.bottom.style ==
cell2.border.left.style ==
'medium')
elif ref == 'C6':
assert not cell1.font.italic
assert cell2.font.italic
elif ref == 'D7':
assert (cell1.alignment.horizontal !=
cell2.alignment.horizontal)
assert cell2.alignment.horizontal == 'right'
elif ref == 'B8':
assert cell1.fill.fgColor.rgb != cell2.fill.fgColor.rgb
assert cell1.fill.patternType != cell2.fill.patternType
assert cell2.fill.fgColor.rgb == alpha + 'FF0000'
assert cell2.fill.patternType == 'solid'
else:
assert_equal_style(cell1, cell2)
assert cell1.value == cell2.value
n_cells += 1
assert n_cells == (10 + 1) * (3 + 1)
# (3) check styling with custom converter
n_cells = 0
for col1, col2 in zip(wb['frame'].columns,
wb['custom'].columns):
assert len(col1) == len(col2)
for cell1, cell2 in zip(col1, col2):
ref = '%s%d' % (cell2.column, cell2.row)
if ref in ('B2', 'C3', 'D4', 'B5', 'C6', 'D7', 'B8'):
assert not cell1.font.bold
assert cell2.font.bold
else:
assert_equal_style(cell1, cell2)
assert cell1.value == cell2.value
n_cells += 1
assert n_cells == (10 + 1) * (3 + 1)
@td.skip_if_no('openpyxl')
@pytest.mark.skipif(not PY36, reason='requires fspath')
class TestFSPath(object):
def test_excelfile_fspath(self):
with tm.ensure_clean('foo.xlsx') as path:
df = DataFrame({"A": [1, 2]})
df.to_excel(path)
xl = ExcelFile(path)
result = os.fspath(xl)
assert result == path
def test_excelwriter_fspath(self):
with tm.ensure_clean('foo.xlsx') as path:
writer = ExcelWriter(path)
assert os.fspath(writer) == str(path)
|
bsd-3-clause
|
enigmampc/catalyst
|
tests/pipeline/test_frameload.py
|
1
|
7709
|
"""
Tests for catalyst.pipeline.loaders.frame.DataFrameLoader.
"""
from unittest import TestCase
from mock import patch
from numpy import arange, ones
from numpy.testing import assert_array_equal
from pandas import (
DataFrame,
DatetimeIndex,
Int64Index,
)
from catalyst.lib.adjustment import (
ADD,
Float64Add,
Float64Multiply,
Float64Overwrite,
MULTIPLY,
OVERWRITE,
)
from catalyst.pipeline.data import USEquityPricing
from catalyst.pipeline.loaders.frame import (
DataFrameLoader,
)
from catalyst.utils.calendars import get_calendar
class DataFrameLoaderTestCase(TestCase):
def setUp(self):
self.trading_day = get_calendar("NYSE").day
self.nsids = 5
self.ndates = 20
self.sids = Int64Index(range(self.nsids))
self.dates = DatetimeIndex(
start='2014-01-02',
freq=self.trading_day,
periods=self.ndates,
)
self.mask = ones((len(self.dates), len(self.sids)), dtype=bool)
def tearDown(self):
pass
def test_bad_input(self):
data = arange(100).reshape(self.ndates, self.nsids)
baseline = DataFrame(data, index=self.dates, columns=self.sids)
loader = DataFrameLoader(
USEquityPricing.close,
baseline,
)
with self.assertRaises(ValueError):
# Wrong column.
loader.load_adjusted_array(
[USEquityPricing.open], self.dates, self.sids, self.mask
)
with self.assertRaises(ValueError):
# Too many columns.
loader.load_adjusted_array(
[USEquityPricing.open, USEquityPricing.close],
self.dates,
self.sids,
self.mask,
)
def test_baseline(self):
data = arange(100).reshape(self.ndates, self.nsids)
baseline = DataFrame(data, index=self.dates, columns=self.sids)
loader = DataFrameLoader(USEquityPricing.close, baseline)
dates_slice = slice(None, 10, None)
sids_slice = slice(1, 3, None)
[adj_array] = loader.load_adjusted_array(
[USEquityPricing.close],
self.dates[dates_slice],
self.sids[sids_slice],
self.mask[dates_slice, sids_slice],
).values()
for idx, window in enumerate(adj_array.traverse(window_length=3)):
expected = baseline.values[dates_slice, sids_slice][idx:idx + 3]
assert_array_equal(window, expected)
def test_adjustments(self):
data = arange(100).reshape(self.ndates, self.nsids)
baseline = DataFrame(data, index=self.dates, columns=self.sids)
# Use the dates from index 10 on and sids 1-3.
dates_slice = slice(10, None, None)
sids_slice = slice(1, 4, None)
# Adjustments that should actually affect the output.
relevant_adjustments = [
{
'sid': 1,
'start_date': None,
'end_date': self.dates[15],
'apply_date': self.dates[16],
'value': 0.5,
'kind': MULTIPLY,
},
{
'sid': 2,
'start_date': self.dates[5],
'end_date': self.dates[15],
'apply_date': self.dates[16],
'value': 1.0,
'kind': ADD,
},
{
'sid': 2,
'start_date': self.dates[15],
'end_date': self.dates[16],
'apply_date': self.dates[17],
'value': 1.0,
'kind': ADD,
},
{
'sid': 3,
'start_date': self.dates[16],
'end_date': self.dates[17],
'apply_date': self.dates[18],
'value': 99.0,
'kind': OVERWRITE,
},
]
# These adjustments shouldn't affect the output.
irrelevant_adjustments = [
{ # Sid Not Requested
'sid': 0,
'start_date': self.dates[16],
'end_date': self.dates[17],
'apply_date': self.dates[18],
'value': -9999.0,
'kind': OVERWRITE,
},
{ # Sid Unknown
'sid': 9999,
'start_date': self.dates[16],
'end_date': self.dates[17],
'apply_date': self.dates[18],
'value': -9999.0,
'kind': OVERWRITE,
},
{ # Date Not Requested
'sid': 2,
'start_date': self.dates[1],
'end_date': self.dates[2],
'apply_date': self.dates[3],
'value': -9999.0,
'kind': OVERWRITE,
},
{ # Date Before Known Data
'sid': 2,
'start_date': self.dates[0] - (2 * self.trading_day),
'end_date': self.dates[0] - self.trading_day,
'apply_date': self.dates[0] - self.trading_day,
'value': -9999.0,
'kind': OVERWRITE,
},
{ # Date After Known Data
'sid': 2,
'start_date': self.dates[-1] + self.trading_day,
'end_date': self.dates[-1] + (2 * self.trading_day),
'apply_date': self.dates[-1] + (3 * self.trading_day),
'value': -9999.0,
'kind': OVERWRITE,
},
]
adjustments = DataFrame(relevant_adjustments + irrelevant_adjustments)
loader = DataFrameLoader(
USEquityPricing.close,
baseline,
adjustments=adjustments,
)
expected_baseline = baseline.iloc[dates_slice, sids_slice]
formatted_adjustments = loader.format_adjustments(
self.dates[dates_slice],
self.sids[sids_slice],
)
expected_formatted_adjustments = {
6: [
Float64Multiply(
first_row=0,
last_row=5,
first_col=0,
last_col=0,
value=0.5,
),
Float64Add(
first_row=0,
last_row=5,
first_col=1,
last_col=1,
value=1.0,
),
],
7: [
Float64Add(
first_row=5,
last_row=6,
first_col=1,
last_col=1,
value=1.0,
),
],
8: [
Float64Overwrite(
first_row=6,
last_row=7,
first_col=2,
last_col=2,
value=99.0,
)
],
}
self.assertEqual(formatted_adjustments, expected_formatted_adjustments)
mask = self.mask[dates_slice, sids_slice]
with patch('catalyst.pipeline.loaders.frame.AdjustedArray') as m:
loader.load_adjusted_array(
columns=[USEquityPricing.close],
dates=self.dates[dates_slice],
assets=self.sids[sids_slice],
mask=mask,
)
self.assertEqual(m.call_count, 1)
args, kwargs = m.call_args
assert_array_equal(kwargs['data'], expected_baseline.values)
assert_array_equal(kwargs['mask'], mask)
self.assertEqual(kwargs['adjustments'], expected_formatted_adjustments)
|
apache-2.0
|
TakayukiSakai/tensorflow
|
tensorflow/contrib/learn/python/learn/io/pandas_io.py
|
2
|
2243
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
PANDAS_DTYPES = {
'int8': 'int',
'int16': 'int',
'int32': 'int',
'int64': 'int',
'uint8': 'int',
'uint16': 'int',
'uint32': 'int',
'uint64': 'int',
'float16': 'float',
'float32': 'float',
'float64': 'float',
'bool': 'i'
}
def extract_pandas_data(data):
"""Extract data from pandas.DataFrame for predictors."""
if not isinstance(data, pd.DataFrame):
return data
if all(dtype.name in PANDAS_DTYPES for dtype in data.dtypes):
return data.values.astype('float')
else:
raise ValueError('Data types for data must be int, float, or bool.')
def extract_pandas_matrix(data):
"""Extracts numpy matrix from pandas DataFrame."""
if not isinstance(data, pd.DataFrame):
return data
return data.as_matrix()
def extract_pandas_labels(labels):
"""Extract data from pandas.DataFrame for labels."""
if isinstance(labels,
pd.DataFrame): # pandas.Series also belongs to DataFrame
if len(labels.columns) > 1:
raise ValueError('Only one column for labels is allowed.')
if all(dtype.name in PANDAS_DTYPES for dtype in labels.dtypes):
return labels.values
else:
raise ValueError('Data types for labels must be int, float, or bool.')
else:
return labels
|
apache-2.0
|
tectronics/mpmath
|
mpmath/visualization.py
|
6
|
9486
|
"""
Plotting (requires matplotlib)
"""
from colorsys import hsv_to_rgb, hls_to_rgb
from .libmp import NoConvergence
from .libmp.backend import xrange
class VisualizationMethods(object):
plot_ignore = (ValueError, ArithmeticError, ZeroDivisionError, NoConvergence)
def plot(ctx, f, xlim=[-5,5], ylim=None, points=200, file=None, dpi=None,
singularities=[], axes=None):
r"""
Shows a simple 2D plot of a function `f(x)` or list of functions
`[f_0(x), f_1(x), \ldots, f_n(x)]` over a given interval
specified by *xlim*. Some examples::
plot(lambda x: exp(x)*li(x), [1, 4])
plot([cos, sin], [-4, 4])
plot([fresnels, fresnelc], [-4, 4])
plot([sqrt, cbrt], [-4, 4])
plot(lambda t: zeta(0.5+t*j), [-20, 20])
plot([floor, ceil, abs, sign], [-5, 5])
Points where the function raises a numerical exception or
returns an infinite value are removed from the graph.
Singularities can also be excluded explicitly
as follows (useful for removing erroneous vertical lines)::
plot(cot, ylim=[-5, 5]) # bad
plot(cot, ylim=[-5, 5], singularities=[-pi, 0, pi]) # good
For parts where the function assumes complex values, the
real part is plotted with dashes and the imaginary part
is plotted with dots.
.. note :: This function requires matplotlib (pylab).
"""
if file:
axes = None
fig = None
if not axes:
import pylab
fig = pylab.figure()
axes = fig.add_subplot(111)
if not isinstance(f, (tuple, list)):
f = [f]
a, b = xlim
colors = ['b', 'r', 'g', 'm', 'k']
for n, func in enumerate(f):
x = ctx.arange(a, b, (b-a)/float(points))
segments = []
segment = []
in_complex = False
for i in xrange(len(x)):
try:
if i != 0:
for sing in singularities:
if x[i-1] <= sing and x[i] >= sing:
raise ValueError
v = func(x[i])
if ctx.isnan(v) or abs(v) > 1e300:
raise ValueError
if hasattr(v, "imag") and v.imag:
re = float(v.real)
im = float(v.imag)
if not in_complex:
in_complex = True
segments.append(segment)
segment = []
segment.append((float(x[i]), re, im))
else:
if in_complex:
in_complex = False
segments.append(segment)
segment = []
if hasattr(v, "real"):
v = v.real
segment.append((float(x[i]), v))
except ctx.plot_ignore:
if segment:
segments.append(segment)
segment = []
if segment:
segments.append(segment)
for segment in segments:
x = [s[0] for s in segment]
y = [s[1] for s in segment]
if not x:
continue
c = colors[n % len(colors)]
if len(segment[0]) == 3:
z = [s[2] for s in segment]
axes.plot(x, y, '--'+c, linewidth=3)
axes.plot(x, z, ':'+c, linewidth=3)
else:
axes.plot(x, y, c, linewidth=3)
axes.set_xlim([float(_) for _ in xlim])
if ylim:
axes.set_ylim([float(_) for _ in ylim])
axes.set_xlabel('x')
axes.set_ylabel('f(x)')
axes.grid(True)
if fig:
if file:
pylab.savefig(file, dpi=dpi)
else:
pylab.show()
def default_color_function(ctx, z):
if ctx.isinf(z):
return (1.0, 1.0, 1.0)
if ctx.isnan(z):
return (0.5, 0.5, 0.5)
pi = 3.1415926535898
a = (float(ctx.arg(z)) + ctx.pi) / (2*ctx.pi)
a = (a + 0.5) % 1.0
b = 1.0 - float(1/(1.0+abs(z)**0.3))
return hls_to_rgb(a, b, 0.8)
def cplot(ctx, f, re=[-5,5], im=[-5,5], points=2000, color=None,
verbose=False, file=None, dpi=None, axes=None):
"""
Plots the given complex-valued function *f* over a rectangular part
of the complex plane specified by the pairs of intervals *re* and *im*.
For example::
cplot(lambda z: z, [-2, 2], [-10, 10])
cplot(exp)
cplot(zeta, [0, 1], [0, 50])
By default, the complex argument (phase) is shown as color (hue) and
the magnitude is show as brightness. You can also supply a
custom color function (*color*). This function should take a
complex number as input and return an RGB 3-tuple containing
floats in the range 0.0-1.0.
To obtain a sharp image, the number of points may need to be
increased to 100,000 or thereabout. Since evaluating the
function that many times is likely to be slow, the 'verbose'
option is useful to display progress.
.. note :: This function requires matplotlib (pylab).
"""
if color is None:
color = ctx.default_color_function
import pylab
if file:
axes = None
fig = None
if not axes:
fig = pylab.figure()
axes = fig.add_subplot(111)
rea, reb = re
ima, imb = im
dre = reb - rea
dim = imb - ima
M = int(ctx.sqrt(points*dre/dim)+1)
N = int(ctx.sqrt(points*dim/dre)+1)
x = pylab.linspace(rea, reb, M)
y = pylab.linspace(ima, imb, N)
# Note: we have to be careful to get the right rotation.
# Test with these plots:
# cplot(lambda z: z if z.real < 0 else 0)
# cplot(lambda z: z if z.imag < 0 else 0)
w = pylab.zeros((N, M, 3))
for n in xrange(N):
for m in xrange(M):
z = ctx.mpc(x[m], y[n])
try:
v = color(f(z))
except ctx.plot_ignore:
v = (0.5, 0.5, 0.5)
w[n,m] = v
if verbose:
print(n, "of", N)
rea, reb, ima, imb = [float(_) for _ in [rea, reb, ima, imb]]
axes.imshow(w, extent=(rea, reb, ima, imb), origin='lower')
axes.set_xlabel('Re(z)')
axes.set_ylabel('Im(z)')
if fig:
if file:
pylab.savefig(file, dpi=dpi)
else:
pylab.show()
def splot(ctx, f, u=[-5,5], v=[-5,5], points=100, keep_aspect=True, \
wireframe=False, file=None, dpi=None, axes=None):
"""
Plots the surface defined by `f`.
If `f` returns a single component, then this plots the surface
defined by `z = f(x,y)` over the rectangular domain with
`x = u` and `y = v`.
If `f` returns three components, then this plots the parametric
surface `x, y, z = f(u,v)` over the pairs of intervals `u` and `v`.
For example, to plot a simple function::
>>> from mpmath import *
>>> f = lambda x, y: sin(x+y)*cos(y)
>>> splot(f, [-pi,pi], [-pi,pi]) # doctest: +SKIP
Plotting a donut::
>>> r, R = 1, 2.5
>>> f = lambda u, v: [r*cos(u), (R+r*sin(u))*cos(v), (R+r*sin(u))*sin(v)]
>>> splot(f, [0, 2*pi], [0, 2*pi]) # doctest: +SKIP
.. note :: This function requires matplotlib (pylab) 0.98.5.3 or higher.
"""
import pylab
import mpl_toolkits.mplot3d as mplot3d
if file:
axes = None
fig = None
if not axes:
fig = pylab.figure()
axes = mplot3d.axes3d.Axes3D(fig)
ua, ub = u
va, vb = v
du = ub - ua
dv = vb - va
if not isinstance(points, (list, tuple)):
points = [points, points]
M, N = points
u = pylab.linspace(ua, ub, M)
v = pylab.linspace(va, vb, N)
x, y, z = [pylab.zeros((M, N)) for i in xrange(3)]
xab, yab, zab = [[0, 0] for i in xrange(3)]
for n in xrange(N):
for m in xrange(M):
fdata = f(ctx.convert(u[m]), ctx.convert(v[n]))
try:
x[m,n], y[m,n], z[m,n] = fdata
except TypeError:
x[m,n], y[m,n], z[m,n] = u[m], v[n], fdata
for c, cab in [(x[m,n], xab), (y[m,n], yab), (z[m,n], zab)]:
if c < cab[0]:
cab[0] = c
if c > cab[1]:
cab[1] = c
if wireframe:
axes.plot_wireframe(x, y, z, rstride=4, cstride=4)
else:
axes.plot_surface(x, y, z, rstride=4, cstride=4)
axes.set_xlabel('x')
axes.set_ylabel('y')
axes.set_zlabel('z')
if keep_aspect:
dx, dy, dz = [cab[1] - cab[0] for cab in [xab, yab, zab]]
maxd = max(dx, dy, dz)
if dx < maxd:
delta = maxd - dx
axes.set_xlim3d(xab[0] - delta / 2.0, xab[1] + delta / 2.0)
if dy < maxd:
delta = maxd - dy
axes.set_ylim3d(yab[0] - delta / 2.0, yab[1] + delta / 2.0)
if dz < maxd:
delta = maxd - dz
axes.set_zlim3d(zab[0] - delta / 2.0, zab[1] + delta / 2.0)
if fig:
if file:
pylab.savefig(file, dpi=dpi)
else:
pylab.show()
VisualizationMethods.plot = plot
VisualizationMethods.default_color_function = default_color_function
VisualizationMethods.cplot = cplot
VisualizationMethods.splot = splot
|
bsd-3-clause
|
lthurlow/Network-Grapher
|
proj/external/matplotlib-1.2.1/lib/mpl_examples/misc/font_indexing.py
|
9
|
1342
|
"""
A little example that shows how the various indexing into the font
tables relate to one another. Mainly for mpl developers....
"""
from __future__ import print_function
import matplotlib
from matplotlib.ft2font import FT2Font, KERNING_DEFAULT, KERNING_UNFITTED, KERNING_UNSCALED
#fname = '/usr/share/fonts/sfd/FreeSans.ttf'
fname = matplotlib.get_data_path() + '/fonts/ttf/Vera.ttf'
font = FT2Font(fname)
font.set_charmap(0)
codes = font.get_charmap().items()
#dsu = [(ccode, glyphind) for ccode, glyphind in codes]
#dsu.sort()
#for ccode, glyphind in dsu:
# try: name = font.get_glyph_name(glyphind)
# except RuntimeError: pass
# else: print '% 4d % 4d %s %s'%(glyphind, ccode, hex(int(ccode)), name)
# make a charname to charcode and glyphind dictionary
coded = {}
glyphd = {}
for ccode, glyphind in codes:
name = font.get_glyph_name(glyphind)
coded[name] = ccode
glyphd[name] = glyphind
code = coded['A']
glyph = font.load_char(code)
#print glyph.bbox
print(glyphd['A'], glyphd['V'], coded['A'], coded['V'])
print('AV', font.get_kerning(glyphd['A'], glyphd['V'], KERNING_DEFAULT))
print('AV', font.get_kerning(glyphd['A'], glyphd['V'], KERNING_UNFITTED))
print('AV', font.get_kerning(glyphd['A'], glyphd['V'], KERNING_UNSCALED))
print('AV', font.get_kerning(glyphd['A'], glyphd['T'], KERNING_UNSCALED))
|
mit
|
dynaryu/rmtk
|
rmtk/vulnerability/derivation_fragility/equivalent_linearization/miranda_2000_firm_soils/miranda_2000_firm_soils.py
|
2
|
1864
|
# -*- coding: utf-8 -*-
import os
import numpy
import math
from scipy import interpolate
from scipy import optimize
import matplotlib.pyplot as plt
from rmtk.vulnerability.common import utils
def calculate_fragility(capacity_curves,gmrs,damage_model,damping):
#This function returns a damage probability matrix (PDM) and the corresponding spectral displacements
#after an iterative process to find the minimum Sd value for each case
no_damage_states = len(damage_model['damage_states'])
no_gmrs = len(gmrs['time'])
no_capacity_curves = len(capacity_curves['Sd'])
PDM = numpy.zeros((no_gmrs,no_damage_states+1))
Sds = numpy.zeros((no_gmrs,no_capacity_curves))
for icc in range(no_capacity_curves):
print str((icc+1)*100/no_capacity_curves) + '%'
Te = capacity_curves['periods'][icc]
Sdy = capacity_curves['Sdy'][icc]
for igmr in range(no_gmrs):
limit_states = utils.define_limit_states(capacity_curves,icc,damage_model)
time = gmrs['time'][igmr]
acc = gmrs['acc'][igmr]
spec_Te = utils.NigamJennings(time,acc,[Te],damping)
Sdi = optimize.fmin(calculate_Sd, Sdy, args=(spec_Te['Sd'],capacity_curves,icc), xtol=0.001, ftol=0.001, disp = False, )
[PDM, ds] = utils.allocate_damage(igmr,PDM,Sdi,limit_states)
Sds[igmr][icc] = Sdi
return PDM, Sds
def calculate_Sd(old_Sd,Sd_Te,capacity_curves,icc):
#This function estimates the inelastic displacements based on the elastic displacement
#and the ratio C
Sdy = capacity_curves['Sdy'][icc]
if Sdy>Sd_Te:
new_Sd = Sd_Te
else:
nu = old_Sd/Sdy
Te = capacity_curves['periods'][icc]
C = (1+(1/nu-1)*math.exp(-12*Te*nu**-0.8))**-1
new_Sd = Sd_Te*C
error = abs(new_Sd-old_Sd)
return error
|
agpl-3.0
|
valexandersaulys/prudential_insurance_kaggle
|
venv/lib/python2.7/site-packages/sklearn/utils/tests/test_estimator_checks.py
|
10
|
3753
|
import scipy.sparse as sp
import numpy as np
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.testing import assert_raises_regex, assert_true
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.ensemble import AdaBoostClassifier
from sklearn.utils.validation import check_X_y, check_array
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
# check that fit does input validation
msg = "TypeError not raised by fit"
assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier)
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
# check for sparse matrix input handling
msg = "Estimator type doesn't seem to fail gracefully on sparse data"
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# doesn't error on actual estimator
check_estimator(AdaBoostClassifier)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier)
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
|
gpl-2.0
|
Hiyorimi/scikit-image
|
doc/examples/segmentation/plot_join_segmentations.py
|
10
|
1998
|
"""
==========================================
Find the intersection of two segmentations
==========================================
When segmenting an image, you may want to combine multiple alternative
segmentations. The `skimage.segmentation.join_segmentations` function
computes the join of two segmentations, in which a pixel is placed in
the same segment if and only if it is in the same segment in _both_
segmentations.
"""
import numpy as np
from scipy import ndimage as ndi
import matplotlib.pyplot as plt
from skimage.filters import sobel
from skimage.segmentation import slic, join_segmentations
from skimage.morphology import watershed
from skimage.color import label2rgb
from skimage import data, img_as_float
coins = img_as_float(data.coins())
# make segmentation using edge-detection and watershed
edges = sobel(coins)
markers = np.zeros_like(coins)
foreground, background = 1, 2
markers[coins < 30.0 / 255] = background
markers[coins > 150.0 / 255] = foreground
ws = watershed(edges, markers)
seg1 = ndi.label(ws == foreground)[0]
# make segmentation using SLIC superpixels
seg2 = slic(coins, n_segments=117, max_iter=160, sigma=1, compactness=0.75,
multichannel=False)
# combine the two
segj = join_segmentations(seg1, seg2)
# show the segmentations
fig, axes = plt.subplots(ncols=4, figsize=(9, 2.5), sharex=True, sharey=True,
subplot_kw={'adjustable': 'box-forced'})
axes[0].imshow(coins, cmap=plt.cm.gray, interpolation='nearest')
axes[0].set_title('Image')
color1 = label2rgb(seg1, image=coins, bg_label=0)
axes[1].imshow(color1, interpolation='nearest')
axes[1].set_title('Sobel+Watershed')
color2 = label2rgb(seg2, image=coins, image_alpha=0.5)
axes[2].imshow(color2, interpolation='nearest')
axes[2].set_title('SLIC superpixels')
color3 = label2rgb(segj, image=coins, image_alpha=0.5)
axes[3].imshow(color3, interpolation='nearest')
axes[3].set_title('Join')
for ax in axes:
ax.axis('off')
fig.tight_layout()
plt.show()
|
bsd-3-clause
|
srowen/spark
|
python/pyspark/sql/pandas/group_ops.py
|
23
|
14683
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
from pyspark.rdd import PythonEvalType
from pyspark.sql.column import Column
from pyspark.sql.dataframe import DataFrame
class PandasGroupedOpsMixin(object):
"""
Min-in for pandas grouped operations. Currently, only :class:`GroupedData`
can use this class.
"""
def apply(self, udf):
"""
It is an alias of :meth:`pyspark.sql.GroupedData.applyInPandas`; however, it takes a
:meth:`pyspark.sql.functions.pandas_udf` whereas
:meth:`pyspark.sql.GroupedData.applyInPandas` takes a Python native function.
.. versionadded:: 2.3.0
Parameters
----------
udf : :func:`pyspark.sql.functions.pandas_udf`
a grouped map user-defined function returned by
:func:`pyspark.sql.functions.pandas_udf`.
Notes
-----
It is preferred to use :meth:`pyspark.sql.GroupedData.applyInPandas` over this
API. This API will be deprecated in the future releases.
Examples
--------
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v"))
>>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP
... def normalize(pdf):
... v = pdf.v
... return pdf.assign(v=(v - v.mean()) / v.std())
>>> df.groupby("id").apply(normalize).show() # doctest: +SKIP
+---+-------------------+
| id| v|
+---+-------------------+
| 1|-0.7071067811865475|
| 1| 0.7071067811865475|
| 2|-0.8320502943378437|
| 2|-0.2773500981126146|
| 2| 1.1094003924504583|
+---+-------------------+
See Also
--------
pyspark.sql.functions.pandas_udf
"""
# Columns are special because hasattr always return True
if isinstance(udf, Column) or not hasattr(udf, 'func') \
or udf.evalType != PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF:
raise ValueError("Invalid udf: the udf argument must be a pandas_udf of type "
"GROUPED_MAP.")
warnings.warn(
"It is preferred to use 'applyInPandas' over this "
"API. This API will be deprecated in the future releases. See SPARK-28264 for "
"more details.", UserWarning)
return self.applyInPandas(udf.func, schema=udf.returnType)
def applyInPandas(self, func, schema):
"""
Maps each group of the current :class:`DataFrame` using a pandas udf and returns the result
as a `DataFrame`.
The function should take a `pandas.DataFrame` and return another
`pandas.DataFrame`. For each group, all columns are passed together as a `pandas.DataFrame`
to the user-function and the returned `pandas.DataFrame` are combined as a
:class:`DataFrame`.
The `schema` should be a :class:`StructType` describing the schema of the returned
`pandas.DataFrame`. The column labels of the returned `pandas.DataFrame` must either match
the field names in the defined schema if specified as strings, or match the
field data types by position if not strings, e.g. integer indices.
The length of the returned `pandas.DataFrame` can be arbitrary.
.. versionadded:: 3.0.0
Parameters
----------
func : function
a Python native function that takes a `pandas.DataFrame`, and outputs a
`pandas.DataFrame`.
schema : :class:`pyspark.sql.types.DataType` or str
the return type of the `func` in PySpark. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
Examples
--------
>>> import pandas as pd # doctest: +SKIP
>>> from pyspark.sql.functions import pandas_udf, ceil
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v")) # doctest: +SKIP
>>> def normalize(pdf):
... v = pdf.v
... return pdf.assign(v=(v - v.mean()) / v.std())
>>> df.groupby("id").applyInPandas(
... normalize, schema="id long, v double").show() # doctest: +SKIP
+---+-------------------+
| id| v|
+---+-------------------+
| 1|-0.7071067811865475|
| 1| 0.7071067811865475|
| 2|-0.8320502943378437|
| 2|-0.2773500981126146|
| 2| 1.1094003924504583|
+---+-------------------+
Alternatively, the user can pass a function that takes two arguments.
In this case, the grouping key(s) will be passed as the first argument and the data will
be passed as the second argument. The grouping key(s) will be passed as a tuple of numpy
data types, e.g., `numpy.int32` and `numpy.float64`. The data will still be passed in
as a `pandas.DataFrame` containing all columns from the original Spark DataFrame.
This is useful when the user does not want to hardcode grouping key(s) in the function.
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v")) # doctest: +SKIP
>>> def mean_func(key, pdf):
... # key is a tuple of one numpy.int64, which is the value
... # of 'id' for the current group
... return pd.DataFrame([key + (pdf.v.mean(),)])
>>> df.groupby('id').applyInPandas(
... mean_func, schema="id long, v double").show() # doctest: +SKIP
+---+---+
| id| v|
+---+---+
| 1|1.5|
| 2|6.0|
+---+---+
>>> def sum_func(key, pdf):
... # key is a tuple of two numpy.int64s, which is the values
... # of 'id' and 'ceil(df.v / 2)' for the current group
... return pd.DataFrame([key + (pdf.v.sum(),)])
>>> df.groupby(df.id, ceil(df.v / 2)).applyInPandas(
... sum_func, schema="id long, `ceil(v / 2)` long, v double").show() # doctest: +SKIP
+---+-----------+----+
| id|ceil(v / 2)| v|
+---+-----------+----+
| 2| 5|10.0|
| 1| 1| 3.0|
| 2| 3| 5.0|
| 2| 2| 3.0|
+---+-----------+----+
Notes
-----
This function requires a full shuffle. All the data of a group will be loaded
into memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
If returning a new `pandas.DataFrame` constructed with a dictionary, it is
recommended to explicitly index the columns by name to ensure the positions are correct,
or alternatively use an `OrderedDict`.
For example, `pd.DataFrame({'id': ids, 'a': data}, columns=['id', 'a'])` or
`pd.DataFrame(OrderedDict([('id', ids), ('a', data)]))`.
This API is experimental.
See Also
--------
pyspark.sql.functions.pandas_udf
"""
from pyspark.sql import GroupedData
from pyspark.sql.functions import pandas_udf, PandasUDFType
assert isinstance(self, GroupedData)
udf = pandas_udf(
func, returnType=schema, functionType=PandasUDFType.GROUPED_MAP)
df = self._df
udf_column = udf(*[df[col] for col in df.columns])
jdf = self._jgd.flatMapGroupsInPandas(udf_column._jc.expr())
return DataFrame(jdf, self.sql_ctx)
def cogroup(self, other):
"""
Cogroups this group with another group so that we can run cogrouped operations.
.. versionadded:: 3.0.0
See :class:`PandasCogroupedOps` for the operations that can be run.
"""
from pyspark.sql import GroupedData
assert isinstance(self, GroupedData)
return PandasCogroupedOps(self, other)
class PandasCogroupedOps(object):
"""
A logical grouping of two :class:`GroupedData`,
created by :func:`GroupedData.cogroup`.
.. versionadded:: 3.0.0
Notes
-----
This API is experimental.
"""
def __init__(self, gd1, gd2):
self._gd1 = gd1
self._gd2 = gd2
self.sql_ctx = gd1.sql_ctx
def applyInPandas(self, func, schema):
"""
Applies a function to each cogroup using pandas and returns the result
as a `DataFrame`.
The function should take two `pandas.DataFrame`\\s and return another
`pandas.DataFrame`. For each side of the cogroup, all columns are passed together as a
`pandas.DataFrame` to the user-function and the returned `pandas.DataFrame` are combined as
a :class:`DataFrame`.
The `schema` should be a :class:`StructType` describing the schema of the returned
`pandas.DataFrame`. The column labels of the returned `pandas.DataFrame` must either match
the field names in the defined schema if specified as strings, or match the
field data types by position if not strings, e.g. integer indices.
The length of the returned `pandas.DataFrame` can be arbitrary.
.. versionadded:: 3.0.0
Parameters
----------
func : function
a Python native function that takes two `pandas.DataFrame`\\s, and
outputs a `pandas.DataFrame`, or that takes one tuple (grouping keys) and two
pandas ``DataFrame``\\s, and outputs a pandas ``DataFrame``.
schema : :class:`pyspark.sql.types.DataType` or str
the return type of the `func` in PySpark. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
Examples
--------
>>> from pyspark.sql.functions import pandas_udf
>>> df1 = spark.createDataFrame(
... [(20000101, 1, 1.0), (20000101, 2, 2.0), (20000102, 1, 3.0), (20000102, 2, 4.0)],
... ("time", "id", "v1"))
>>> df2 = spark.createDataFrame(
... [(20000101, 1, "x"), (20000101, 2, "y")],
... ("time", "id", "v2"))
>>> def asof_join(l, r):
... return pd.merge_asof(l, r, on="time", by="id")
>>> df1.groupby("id").cogroup(df2.groupby("id")).applyInPandas(
... asof_join, schema="time int, id int, v1 double, v2 string"
... ).show() # doctest: +SKIP
+--------+---+---+---+
| time| id| v1| v2|
+--------+---+---+---+
|20000101| 1|1.0| x|
|20000102| 1|3.0| x|
|20000101| 2|2.0| y|
|20000102| 2|4.0| y|
+--------+---+---+---+
Alternatively, the user can define a function that takes three arguments. In this case,
the grouping key(s) will be passed as the first argument and the data will be passed as the
second and third arguments. The grouping key(s) will be passed as a tuple of numpy data
types, e.g., `numpy.int32` and `numpy.float64`. The data will still be passed in as two
`pandas.DataFrame` containing all columns from the original Spark DataFrames.
>>> def asof_join(k, l, r):
... if k == (1,):
... return pd.merge_asof(l, r, on="time", by="id")
... else:
... return pd.DataFrame(columns=['time', 'id', 'v1', 'v2'])
>>> df1.groupby("id").cogroup(df2.groupby("id")).applyInPandas(
... asof_join, "time int, id int, v1 double, v2 string").show() # doctest: +SKIP
+--------+---+---+---+
| time| id| v1| v2|
+--------+---+---+---+
|20000101| 1|1.0| x|
|20000102| 1|3.0| x|
+--------+---+---+---+
Notes
-----
This function requires a full shuffle. All the data of a cogroup will be loaded
into memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
If returning a new `pandas.DataFrame` constructed with a dictionary, it is
recommended to explicitly index the columns by name to ensure the positions are correct,
or alternatively use an `OrderedDict`.
For example, `pd.DataFrame({'id': ids, 'a': data}, columns=['id', 'a'])` or
`pd.DataFrame(OrderedDict([('id', ids), ('a', data)]))`.
This API is experimental.
See Also
--------
pyspark.sql.functions.pandas_udf
"""
from pyspark.sql.pandas.functions import pandas_udf
udf = pandas_udf(
func, returnType=schema, functionType=PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF)
all_cols = self._extract_cols(self._gd1) + self._extract_cols(self._gd2)
udf_column = udf(*all_cols)
jdf = self._gd1._jgd.flatMapCoGroupsInPandas(self._gd2._jgd, udf_column._jc.expr())
return DataFrame(jdf, self.sql_ctx)
@staticmethod
def _extract_cols(gd):
df = gd._df
return [df[col] for col in df.columns]
def _test():
import doctest
from pyspark.sql import SparkSession
import pyspark.sql.pandas.group_ops
globs = pyspark.sql.pandas.group_ops.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.pandas.group tests")\
.getOrCreate()
globs['spark'] = spark
(failure_count, test_count) = doctest.testmod(
pyspark.sql.pandas.group_ops, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
apache-2.0
|
balazssimon/ml-playground
|
udemy/lazyprogrammer/deep-reinforcement-learning-python/cartpole/random_search.py
|
1
|
1282
|
import gym
import numpy as np
import matplotlib.pyplot as plt
def get_action(s, w):
return 1 if s.dot(w) > 0 else 0
def play_one_episode(env, params):
observation = env.reset()
done = False
t = 0
while not done and t < 10000:
# env.render()
t += 1
action = get_action(observation, params)
observation, reward, done, info = env.step(action)
if done:
break
return t
def play_multiple_episodes(env, T, params):
episode_lengths = np.empty(T)
for i in range(T):
episode_lengths[i] = play_one_episode(env, params)
avg_length = episode_lengths.mean()
print("avg length:", avg_length)
return avg_length
def random_search(env):
episode_lengths = []
best = 0
params = None
for t in range(100):
new_params = np.random.random(4)*2 - 1
avg_length = play_multiple_episodes(env, 100, new_params)
episode_lengths.append(avg_length)
if avg_length > best:
params = new_params
best = avg_length
return episode_lengths, params
if __name__ == '__main__':
env = gym.make('CartPole-v0')
episode_lengths, params = random_search(env)
plt.plot(episode_lengths)
plt.show()
# play a final set of episodes
print("***Final run with final weights***")
play_multiple_episodes(env, 100, params)
|
apache-2.0
|
GuessWhoSamFoo/pandas
|
asv_bench/benchmarks/inference.py
|
5
|
3174
|
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame, Series, to_numeric
from .pandas_vb_common import numeric_dtypes, lib
class NumericInferOps(object):
# from GH 7332
params = numeric_dtypes
param_names = ['dtype']
def setup(self, dtype):
N = 5 * 10**5
self.df = DataFrame({'A': np.arange(N).astype(dtype),
'B': np.arange(N).astype(dtype)})
def time_add(self, dtype):
self.df['A'] + self.df['B']
def time_subtract(self, dtype):
self.df['A'] - self.df['B']
def time_multiply(self, dtype):
self.df['A'] * self.df['B']
def time_divide(self, dtype):
self.df['A'] / self.df['B']
def time_modulo(self, dtype):
self.df['A'] % self.df['B']
class DateInferOps(object):
# from GH 7332
def setup_cache(self):
N = 5 * 10**5
df = DataFrame({'datetime64': np.arange(N).astype('datetime64[ms]')})
df['timedelta'] = df['datetime64'] - df['datetime64']
return df
def time_subtract_datetimes(self, df):
df['datetime64'] - df['datetime64']
def time_timedelta_plus_datetime(self, df):
df['timedelta'] + df['datetime64']
def time_add_timedeltas(self, df):
df['timedelta'] + df['timedelta']
class ToNumeric(object):
params = ['ignore', 'coerce']
param_names = ['errors']
def setup(self, errors):
N = 10000
self.float = Series(np.random.randn(N))
self.numstr = self.float.astype('str')
self.str = Series(tm.makeStringIndex(N))
def time_from_float(self, errors):
to_numeric(self.float, errors=errors)
def time_from_numeric_str(self, errors):
to_numeric(self.numstr, errors=errors)
def time_from_str(self, errors):
to_numeric(self.str, errors=errors)
class ToNumericDowncast(object):
param_names = ['dtype', 'downcast']
params = [['string-float', 'string-int', 'string-nint', 'datetime64',
'int-list', 'int32'],
[None, 'integer', 'signed', 'unsigned', 'float']]
N = 500000
N2 = int(N / 2)
data_dict = {'string-int': ['1'] * N2 + [2] * N2,
'string-nint': ['-1'] * N2 + [2] * N2,
'datetime64': np.repeat(np.array(['1970-01-01', '1970-01-02'],
dtype='datetime64[D]'), N),
'string-float': ['1.1'] * N2 + [2] * N2,
'int-list': [1] * N2 + [2] * N2,
'int32': np.repeat(np.int32(1), N)}
def setup(self, dtype, downcast):
self.data = self.data_dict[dtype]
def time_downcast(self, dtype, downcast):
to_numeric(self.data, downcast=downcast)
class MaybeConvertNumeric(object):
def setup_cache(self):
N = 10**6
arr = np.repeat([2**63], N) + np.arange(N).astype('uint64')
data = arr.astype(object)
data[1::2] = arr[1::2].astype(str)
data[-1] = -1
return data
def time_convert(self, data):
lib.maybe_convert_numeric(data, set(), coerce_numeric=False)
from .pandas_vb_common import setup # noqa: F401
|
bsd-3-clause
|
DSLituiev/scikit-learn
|
sklearn/externals/joblib/__init__.py
|
31
|
4757
|
""" Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://pythonhosted.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make it easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.9.4'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
|
bsd-3-clause
|
cython-testbed/pandas
|
asv_bench/benchmarks/stat_ops.py
|
1
|
3351
|
import numpy as np
import pandas as pd
ops = ['mean', 'sum', 'median', 'std', 'skew', 'kurt', 'mad', 'prod', 'sem',
'var']
class FrameOps(object):
goal_time = 0.2
params = [ops, ['float', 'int'], [0, 1], [True, False]]
param_names = ['op', 'dtype', 'axis', 'use_bottleneck']
def setup(self, op, dtype, axis, use_bottleneck):
df = pd.DataFrame(np.random.randn(100000, 4)).astype(dtype)
try:
pd.options.compute.use_bottleneck = use_bottleneck
except TypeError:
from pandas.core import nanops
nanops._USE_BOTTLENECK = use_bottleneck
self.df_func = getattr(df, op)
def time_op(self, op, dtype, axis, use_bottleneck):
self.df_func(axis=axis)
class FrameMultiIndexOps(object):
goal_time = 0.2
params = ([0, 1, [0, 1]], ops)
param_names = ['level', 'op']
def setup(self, level, op):
levels = [np.arange(10), np.arange(100), np.arange(100)]
labels = [np.arange(10).repeat(10000),
np.tile(np.arange(100).repeat(100), 10),
np.tile(np.tile(np.arange(100), 100), 10)]
index = pd.MultiIndex(levels=levels, labels=labels)
df = pd.DataFrame(np.random.randn(len(index), 4), index=index)
self.df_func = getattr(df, op)
def time_op(self, level, op):
self.df_func(level=level)
class SeriesOps(object):
goal_time = 0.2
params = [ops, ['float', 'int'], [True, False]]
param_names = ['op', 'dtype', 'use_bottleneck']
def setup(self, op, dtype, use_bottleneck):
s = pd.Series(np.random.randn(100000)).astype(dtype)
try:
pd.options.compute.use_bottleneck = use_bottleneck
except TypeError:
from pandas.core import nanops
nanops._USE_BOTTLENECK = use_bottleneck
self.s_func = getattr(s, op)
def time_op(self, op, dtype, use_bottleneck):
self.s_func()
class SeriesMultiIndexOps(object):
goal_time = 0.2
params = ([0, 1, [0, 1]], ops)
param_names = ['level', 'op']
def setup(self, level, op):
levels = [np.arange(10), np.arange(100), np.arange(100)]
labels = [np.arange(10).repeat(10000),
np.tile(np.arange(100).repeat(100), 10),
np.tile(np.tile(np.arange(100), 100), 10)]
index = pd.MultiIndex(levels=levels, labels=labels)
s = pd.Series(np.random.randn(len(index)), index=index)
self.s_func = getattr(s, op)
def time_op(self, level, op):
self.s_func(level=level)
class Rank(object):
goal_time = 0.2
params = [['DataFrame', 'Series'], [True, False]]
param_names = ['constructor', 'pct']
def setup(self, constructor, pct):
values = np.random.randn(10**5)
self.data = getattr(pd, constructor)(values)
def time_rank(self, constructor, pct):
self.data.rank(pct=pct)
def time_average_old(self, constructor, pct):
self.data.rank(pct=pct) / len(self.data)
class Correlation(object):
goal_time = 0.2
params = ['spearman', 'kendall', 'pearson']
param_names = ['method']
def setup(self, method):
self.df = pd.DataFrame(np.random.randn(1000, 30))
def time_corr(self, method):
self.df.corr(method=method)
from .pandas_vb_common import setup # noqa: F401
|
bsd-3-clause
|
zzcclp/spark
|
python/pyspark/pandas/data_type_ops/binary_ops.py
|
6
|
3672
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, Union, cast
from pandas.api.types import CategoricalDtype
from pyspark.pandas.base import column_op, IndexOpsMixin
from pyspark.pandas._typing import Dtype, IndexOpsLike, SeriesOrIndex
from pyspark.pandas.data_type_ops.base import (
DataTypeOps,
_as_bool_type,
_as_categorical_type,
_as_other_type,
_as_string_type,
)
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import pandas_on_spark_type
from pyspark.sql import functions as F, Column
from pyspark.sql.types import BinaryType, BooleanType, StringType
class BinaryOps(DataTypeOps):
"""
The class for binary operations of pandas-on-Spark objects with BinaryType.
"""
@property
def pretty_name(self) -> str:
return "binaries"
def add(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, BinaryType):
return column_op(F.concat)(left, right)
elif isinstance(right, bytes):
return column_op(F.concat)(left, SF.lit(right))
else:
raise TypeError(
"Concatenation can not be applied to %s and the given type." % self.pretty_name
)
def radd(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
if isinstance(right, bytes):
return cast(
SeriesOrIndex, left._with_new_scol(F.concat(SF.lit(right), left.spark.column))
)
else:
raise TypeError(
"Concatenation can not be applied to %s and the given type." % self.pretty_name
)
def lt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
from pyspark.pandas.base import column_op
return column_op(Column.__lt__)(left, right)
def le(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
from pyspark.pandas.base import column_op
return column_op(Column.__le__)(left, right)
def ge(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
from pyspark.pandas.base import column_op
return column_op(Column.__ge__)(left, right)
def gt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
from pyspark.pandas.base import column_op
return column_op(Column.__gt__)(left, right)
def astype(self, index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
dtype, spark_type = pandas_on_spark_type(dtype)
if isinstance(dtype, CategoricalDtype):
return _as_categorical_type(index_ops, dtype, spark_type)
elif isinstance(spark_type, BooleanType):
return _as_bool_type(index_ops, dtype)
elif isinstance(spark_type, StringType):
return _as_string_type(index_ops, dtype)
else:
return _as_other_type(index_ops, dtype, spark_type)
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.